Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.793
      1 /*	$NetBSD: if_wm.c,v 1.793 2024/01/18 03:16:44 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.793 2024/01/18 03:16:44 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 
    464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	bool wmq_wq_enqueued;
    482 	struct work wmq_cookie;
    483 	void *wmq_si;
    484 };
    485 
    486 struct wm_phyop {
    487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    488 	void (*release)(struct wm_softc *);
    489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    490 	int (*writereg_locked)(device_t, int, int, uint16_t);
    491 	int reset_delay_us;
    492 	bool no_errprint;
    493 };
    494 
    495 struct wm_nvmop {
    496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    497 	void (*release)(struct wm_softc *);
    498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    499 };
    500 
    501 /*
    502  * Software state per device.
    503  */
    504 struct wm_softc {
    505 	device_t sc_dev;		/* generic device information */
    506 	bus_space_tag_t sc_st;		/* bus space tag */
    507 	bus_space_handle_t sc_sh;	/* bus space handle */
    508 	bus_size_t sc_ss;		/* bus space size */
    509 	bus_space_tag_t sc_iot;		/* I/O space tag */
    510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    511 	bus_size_t sc_ios;		/* I/O space size */
    512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    514 	bus_size_t sc_flashs;		/* flash registers space size */
    515 	off_t sc_flashreg_offset;	/*
    516 					 * offset to flash registers from
    517 					 * start of BAR
    518 					 */
    519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    520 
    521 	struct ethercom sc_ethercom;	/* Ethernet common data */
    522 	struct mii_data sc_mii;		/* MII/media information */
    523 
    524 	pci_chipset_tag_t sc_pc;
    525 	pcitag_t sc_pcitag;
    526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    528 
    529 	uint16_t sc_pcidevid;		/* PCI device ID */
    530 	wm_chip_type sc_type;		/* MAC type */
    531 	int sc_rev;			/* MAC revision */
    532 	wm_phy_type sc_phytype;		/* PHY type */
    533 	uint8_t sc_sfptype;		/* SFP type */
    534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    535 #define	WM_MEDIATYPE_UNKNOWN		0x00
    536 #define	WM_MEDIATYPE_FIBER		0x01
    537 #define	WM_MEDIATYPE_COPPER		0x02
    538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    540 	u_int sc_flags;			/* flags; see below */
    541 	u_short sc_if_flags;		/* last if_flags */
    542 	int sc_ec_capenable;		/* last ec_capenable */
    543 	int sc_flowflags;		/* 802.3x flow control flags */
    544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    545 	int sc_align_tweak;
    546 
    547 	void *sc_ihs[WM_MAX_NINTR];	/*
    548 					 * interrupt cookie.
    549 					 * - legacy and msi use sc_ihs[0] only
    550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    551 					 */
    552 	pci_intr_handle_t *sc_intrs;	/*
    553 					 * legacy and msi use sc_intrs[0] only
    554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    555 					 */
    556 	int sc_nintrs;			/* number of interrupts */
    557 
    558 	int sc_link_intr_idx;		/* index of MSI-X tables */
    559 
    560 	callout_t sc_tick_ch;		/* tick callout */
    561 	bool sc_core_stopping;
    562 
    563 	int sc_nvm_ver_major;
    564 	int sc_nvm_ver_minor;
    565 	int sc_nvm_ver_build;
    566 	int sc_nvm_addrbits;		/* NVM address bits */
    567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    568 	int sc_ich8_flash_base;
    569 	int sc_ich8_flash_bank_size;
    570 	int sc_nvm_k1_enabled;
    571 
    572 	int sc_nqueues;
    573 	struct wm_queue *sc_queue;
    574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    578 	struct workqueue *sc_queue_wq;
    579 	bool sc_txrx_use_workqueue;
    580 
    581 	int sc_affinity_offset;
    582 
    583 #ifdef WM_EVENT_COUNTERS
    584 	/* Event counters. */
    585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    586 
    587 	/* >= WM_T_82542_2_1 */
    588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    593 
    594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    599 	struct evcnt sc_ev_scc;		/* Single Collision */
    600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    602 	struct evcnt sc_ev_latecol;	/* Late Collision */
    603 	struct evcnt sc_ev_colc;	/* Collision */
    604 	struct evcnt sc_ev_cbtmpc;	/* Circuit Breaker Tx Mng. Packet */
    605 	struct evcnt sc_ev_dc;		/* Defer */
    606 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    607 	struct evcnt sc_ev_sec;		/* Sequence Error */
    608 
    609 	/* Old */
    610 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    611 	/* New */
    612 	struct evcnt sc_ev_htdpmc;	/* Host Tx Discarded Pkts by MAC */
    613 
    614 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    615 	struct evcnt sc_ev_cbrdpc;	/* Circuit Breaker Rx Dropped Packet */
    616 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    617 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    618 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    619 	struct evcnt sc_ev_prc511;	/* Packets Rx (256-511 bytes) */
    620 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    621 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    622 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    623 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    624 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    625 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    626 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    627 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    628 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    629 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    630 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    631 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    632 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    633 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    634 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    635 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    636 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    637 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    638 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    639 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    640 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    641 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    642 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    643 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    644 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    645 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    646 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    647 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx */
    648 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    649 
    650 	/* Old */
    651 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    652 	/* New */
    653 	struct evcnt sc_ev_cbrmpc;	/* Circuit Breaker Rx Mng. Packet */
    654 
    655 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    656 
    657 	/* Old */
    658 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    659 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    660 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    661 	struct evcnt sc_ev_ictxatc;	/* Intr. Cause Tx Abs Timer Expire */
    662 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    663 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    664 	/*
    665 	 * sc_ev_rxdmtc is shared with both "Intr. cause" and
    666 	 * non "Intr. cause" register.
    667 	 */
    668 	struct evcnt sc_ev_rxdmtc;	/* (Intr. Cause) Rx Desc Min Thresh */
    669 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    670 	/* New */
    671 	struct evcnt sc_ev_rpthc;	/* Rx Packets To Host */
    672 	struct evcnt sc_ev_debug1;	/* Debug Counter 1 */
    673 	struct evcnt sc_ev_debug2;	/* Debug Counter 2 */
    674 	struct evcnt sc_ev_debug3;	/* Debug Counter 3 */
    675 	struct evcnt sc_ev_hgptc;	/* Host Good Packets TX */
    676 	struct evcnt sc_ev_debug4;	/* Debug Counter 4 */
    677 	struct evcnt sc_ev_htcbdpc;	/* Host Tx Circuit Breaker Drp. Pkts */
    678 	struct evcnt sc_ev_hgorc;	/* Host Good Octets Rx */
    679 	struct evcnt sc_ev_hgotc;	/* Host Good Octets Tx */
    680 	struct evcnt sc_ev_lenerrs;	/* Length Error */
    681 	struct evcnt sc_ev_tlpic;	/* EEE Tx LPI */
    682 	struct evcnt sc_ev_rlpic;	/* EEE Rx LPI */
    683 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    684 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    685 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    686 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    687 	struct evcnt sc_ev_scvpc;	/* SerDes/SGMII Code Violation Pkt. */
    688 	struct evcnt sc_ev_hrmpc;	/* Header Redirection Missed Packet */
    689 #endif /* WM_EVENT_COUNTERS */
    690 
    691 	struct sysctllog *sc_sysctllog;
    692 
    693 	/* This variable are used only on the 82547. */
    694 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    695 
    696 	uint32_t sc_ctrl;		/* prototype CTRL register */
    697 #if 0
    698 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    699 #endif
    700 	uint32_t sc_icr;		/* prototype interrupt bits */
    701 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    702 	uint32_t sc_tctl;		/* prototype TCTL register */
    703 	uint32_t sc_rctl;		/* prototype RCTL register */
    704 	uint32_t sc_txcw;		/* prototype TXCW register */
    705 	uint32_t sc_tipg;		/* prototype TIPG register */
    706 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    707 	uint32_t sc_pba;		/* prototype PBA register */
    708 
    709 	int sc_tbi_linkup;		/* TBI link status */
    710 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    711 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    712 	struct timeval sc_linkup_delay_time; /* delay LINK_STATE_UP */
    713 
    714 	int sc_mchash_type;		/* multicast filter offset */
    715 
    716 	krndsource_t rnd_source;	/* random source */
    717 
    718 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    719 
    720 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    721 	kmutex_t *sc_ich_phymtx;	/*
    722 					 * 82574/82583/ICH/PCH specific PHY
    723 					 * mutex. For 82574/82583, the mutex
    724 					 * is used for both PHY and NVM.
    725 					 */
    726 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    727 
    728 	struct wm_phyop phy;
    729 	struct wm_nvmop nvm;
    730 
    731 	struct workqueue *sc_reset_wq;
    732 	struct work sc_reset_work;
    733 	volatile unsigned sc_reset_pending;
    734 
    735 	bool sc_dying;
    736 
    737 #ifdef WM_DEBUG
    738 	uint32_t sc_debug;
    739 	bool sc_trigger_reset;
    740 #endif
    741 };
    742 
    743 #define	WM_RXCHAIN_RESET(rxq)						\
    744 do {									\
    745 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    746 	*(rxq)->rxq_tailp = NULL;					\
    747 	(rxq)->rxq_len = 0;						\
    748 } while (/*CONSTCOND*/0)
    749 
    750 #define	WM_RXCHAIN_LINK(rxq, m)						\
    751 do {									\
    752 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    753 	(rxq)->rxq_tailp = &(m)->m_next;				\
    754 } while (/*CONSTCOND*/0)
    755 
    756 #ifdef WM_EVENT_COUNTERS
    757 #ifdef __HAVE_ATOMIC64_LOADSTORE
    758 #define	WM_EVCNT_INCR(ev)						\
    759 	atomic_store_relaxed(&((ev)->ev_count),				\
    760 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    761 #define	WM_EVCNT_STORE(ev, val)						\
    762 	atomic_store_relaxed(&((ev)->ev_count), (val))
    763 #define	WM_EVCNT_ADD(ev, val)						\
    764 	atomic_store_relaxed(&((ev)->ev_count),				\
    765 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    766 #else
    767 #define	WM_EVCNT_INCR(ev)						\
    768 	((ev)->ev_count)++
    769 #define	WM_EVCNT_STORE(ev, val)						\
    770 	((ev)->ev_count = (val))
    771 #define	WM_EVCNT_ADD(ev, val)						\
    772 	(ev)->ev_count += (val)
    773 #endif
    774 
    775 #define WM_Q_EVCNT_INCR(qname, evname)			\
    776 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    777 #define WM_Q_EVCNT_STORE(qname, evname, val)		\
    778 	WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val))
    779 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    780 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    781 #else /* !WM_EVENT_COUNTERS */
    782 #define	WM_EVCNT_INCR(ev)	__nothing
    783 #define	WM_EVCNT_STORE(ev, val)	__nothing
    784 #define	WM_EVCNT_ADD(ev, val)	__nothing
    785 
    786 #define WM_Q_EVCNT_INCR(qname, evname)		__nothing
    787 #define WM_Q_EVCNT_STORE(qname, evname, val)	__nothing
    788 #define WM_Q_EVCNT_ADD(qname, evname, val)	__nothing
    789 #endif /* !WM_EVENT_COUNTERS */
    790 
    791 #define	CSR_READ(sc, reg)						\
    792 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    793 #define	CSR_WRITE(sc, reg, val)						\
    794 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    795 #define	CSR_WRITE_FLUSH(sc)						\
    796 	(void)CSR_READ((sc), WMREG_STATUS)
    797 
    798 #define ICH8_FLASH_READ32(sc, reg)					\
    799 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    800 	    (reg) + sc->sc_flashreg_offset)
    801 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    802 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    803 	    (reg) + sc->sc_flashreg_offset, (data))
    804 
    805 #define ICH8_FLASH_READ16(sc, reg)					\
    806 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    807 	    (reg) + sc->sc_flashreg_offset)
    808 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    809 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    810 	    (reg) + sc->sc_flashreg_offset, (data))
    811 
    812 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    813 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    814 
    815 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    816 #define	WM_CDTXADDR_HI(txq, x)						\
    817 	(sizeof(bus_addr_t) == 8 ?					\
    818 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    819 
    820 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    821 #define	WM_CDRXADDR_HI(rxq, x)						\
    822 	(sizeof(bus_addr_t) == 8 ?					\
    823 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    824 
    825 /*
    826  * Register read/write functions.
    827  * Other than CSR_{READ|WRITE}().
    828  */
    829 #if 0
    830 static inline uint32_t wm_io_read(struct wm_softc *, int);
    831 #endif
    832 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    833 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    834     uint32_t, uint32_t);
    835 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    836 
    837 /*
    838  * Descriptor sync/init functions.
    839  */
    840 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    841 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    842 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    843 
    844 /*
    845  * Device driver interface functions and commonly used functions.
    846  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    847  */
    848 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    849 static int	wm_match(device_t, cfdata_t, void *);
    850 static void	wm_attach(device_t, device_t, void *);
    851 static int	wm_detach(device_t, int);
    852 static bool	wm_suspend(device_t, const pmf_qual_t *);
    853 static bool	wm_resume(device_t, const pmf_qual_t *);
    854 static bool	wm_watchdog(struct ifnet *);
    855 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    856     uint16_t *);
    857 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    858     uint16_t *);
    859 static void	wm_tick(void *);
    860 static int	wm_ifflags_cb(struct ethercom *);
    861 static int	wm_ioctl(struct ifnet *, u_long, void *);
    862 /* MAC address related */
    863 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    864 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    865 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    866 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    867 static int	wm_rar_count(struct wm_softc *);
    868 static void	wm_set_filter(struct wm_softc *);
    869 /* Reset and init related */
    870 static void	wm_set_vlan(struct wm_softc *);
    871 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    872 static void	wm_get_auto_rd_done(struct wm_softc *);
    873 static void	wm_lan_init_done(struct wm_softc *);
    874 static void	wm_get_cfg_done(struct wm_softc *);
    875 static int	wm_phy_post_reset(struct wm_softc *);
    876 static int	wm_write_smbus_addr(struct wm_softc *);
    877 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    878 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    879 static void	wm_initialize_hardware_bits(struct wm_softc *);
    880 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    881 static int	wm_reset_phy(struct wm_softc *);
    882 static void	wm_flush_desc_rings(struct wm_softc *);
    883 static void	wm_reset(struct wm_softc *);
    884 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    885 static void	wm_rxdrain(struct wm_rxqueue *);
    886 static void	wm_init_rss(struct wm_softc *);
    887 static void	wm_adjust_qnum(struct wm_softc *, int);
    888 static inline bool	wm_is_using_msix(struct wm_softc *);
    889 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    890 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    891 static int	wm_setup_legacy(struct wm_softc *);
    892 static int	wm_setup_msix(struct wm_softc *);
    893 static int	wm_init(struct ifnet *);
    894 static int	wm_init_locked(struct ifnet *);
    895 static void	wm_init_sysctls(struct wm_softc *);
    896 static void	wm_update_stats(struct wm_softc *);
    897 static void	wm_clear_evcnt(struct wm_softc *);
    898 static void	wm_unset_stopping_flags(struct wm_softc *);
    899 static void	wm_set_stopping_flags(struct wm_softc *);
    900 static void	wm_stop(struct ifnet *, int);
    901 static void	wm_stop_locked(struct ifnet *, bool, bool);
    902 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    903 static void	wm_82547_txfifo_stall(void *);
    904 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    905 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    906 /* DMA related */
    907 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    908 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    909 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    910 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    911     struct wm_txqueue *);
    912 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    913 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    914 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    915     struct wm_rxqueue *);
    916 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    917 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    918 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    919 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    920 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    921 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    922 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    923     struct wm_txqueue *);
    924 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    925     struct wm_rxqueue *);
    926 static int	wm_alloc_txrx_queues(struct wm_softc *);
    927 static void	wm_free_txrx_queues(struct wm_softc *);
    928 static int	wm_init_txrx_queues(struct wm_softc *);
    929 /* Start */
    930 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    931     struct wm_txsoft *, uint32_t *, uint8_t *);
    932 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    933 static void	wm_start(struct ifnet *);
    934 static void	wm_start_locked(struct ifnet *);
    935 static int	wm_transmit(struct ifnet *, struct mbuf *);
    936 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    937 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    938     bool);
    939 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    940     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    941 static void	wm_nq_start(struct ifnet *);
    942 static void	wm_nq_start_locked(struct ifnet *);
    943 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    944 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    945 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    946     bool);
    947 static void	wm_deferred_start_locked(struct wm_txqueue *);
    948 static void	wm_handle_queue(void *);
    949 static void	wm_handle_queue_work(struct work *, void *);
    950 static void	wm_handle_reset_work(struct work *, void *);
    951 /* Interrupt */
    952 static bool	wm_txeof(struct wm_txqueue *, u_int);
    953 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    954 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    955 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    956 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    957 static void	wm_linkintr(struct wm_softc *, uint32_t);
    958 static int	wm_intr_legacy(void *);
    959 static inline void	wm_txrxintr_disable(struct wm_queue *);
    960 static inline void	wm_txrxintr_enable(struct wm_queue *);
    961 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    962 static int	wm_txrxintr_msix(void *);
    963 static int	wm_linkintr_msix(void *);
    964 
    965 /*
    966  * Media related.
    967  * GMII, SGMII, TBI, SERDES and SFP.
    968  */
    969 /* Common */
    970 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    971 /* GMII related */
    972 static void	wm_gmii_reset(struct wm_softc *);
    973 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    974 static int	wm_get_phy_id_82575(struct wm_softc *);
    975 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    976 static int	wm_gmii_mediachange(struct ifnet *);
    977 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    978 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    979 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    980 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    981 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    982 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    983 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    984 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    985 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    986 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    987 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    988 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    989 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    990 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    991 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    992 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    993 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    994 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    995 	bool);
    996 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    997 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    998 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    999 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
   1000 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
   1001 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
   1002 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
   1003 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
   1004 static void	wm_gmii_statchg(struct ifnet *);
   1005 /*
   1006  * kumeran related (80003, ICH* and PCH*).
   1007  * These functions are not for accessing MII registers but for accessing
   1008  * kumeran specific registers.
   1009  */
   1010 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
   1011 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
   1012 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
   1013 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
   1014 /* EMI register related */
   1015 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
   1016 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
   1017 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
   1018 /* SGMII */
   1019 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
   1020 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
   1021 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
   1022 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
   1023 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
   1024 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
   1025 /* TBI related */
   1026 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
   1027 static void	wm_tbi_mediainit(struct wm_softc *);
   1028 static int	wm_tbi_mediachange(struct ifnet *);
   1029 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
   1030 static int	wm_check_for_link(struct wm_softc *);
   1031 static void	wm_tbi_tick(struct wm_softc *);
   1032 /* SERDES related */
   1033 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1034 static int	wm_serdes_mediachange(struct ifnet *);
   1035 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1036 static void	wm_serdes_tick(struct wm_softc *);
   1037 /* SFP related */
   1038 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1039 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1040 
   1041 /*
   1042  * NVM related.
   1043  * Microwire, SPI (w/wo EERD) and Flash.
   1044  */
   1045 /* Misc functions */
   1046 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1047 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1048 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1049 /* Microwire */
   1050 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1051 /* SPI */
   1052 static int	wm_nvm_ready_spi(struct wm_softc *);
   1053 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1054 /* Using with EERD */
   1055 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1056 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1057 /* Flash */
   1058 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1059     unsigned int *);
   1060 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1061 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1062 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1063     uint32_t *);
   1064 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1065 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1066 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1067 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1068 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1069 /* iNVM */
   1070 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1071 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1072 /* Lock, detecting NVM type, validate checksum and read */
   1073 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1074 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1075 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1076 static void	wm_nvm_version_invm(struct wm_softc *);
   1077 static void	wm_nvm_version(struct wm_softc *);
   1078 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1079 
   1080 /*
   1081  * Hardware semaphores.
   1082  * Very complexed...
   1083  */
   1084 static int	wm_get_null(struct wm_softc *);
   1085 static void	wm_put_null(struct wm_softc *);
   1086 static int	wm_get_eecd(struct wm_softc *);
   1087 static void	wm_put_eecd(struct wm_softc *);
   1088 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1089 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1090 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1091 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1092 static int	wm_get_nvm_80003(struct wm_softc *);
   1093 static void	wm_put_nvm_80003(struct wm_softc *);
   1094 static int	wm_get_nvm_82571(struct wm_softc *);
   1095 static void	wm_put_nvm_82571(struct wm_softc *);
   1096 static int	wm_get_phy_82575(struct wm_softc *);
   1097 static void	wm_put_phy_82575(struct wm_softc *);
   1098 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1099 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1100 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1101 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1102 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1103 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1104 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1105 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1106 
   1107 /*
   1108  * Management mode and power management related subroutines.
   1109  * BMC, AMT, suspend/resume and EEE.
   1110  */
   1111 #if 0
   1112 static int	wm_check_mng_mode(struct wm_softc *);
   1113 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1114 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1115 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1116 #endif
   1117 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1118 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1119 static void	wm_get_hw_control(struct wm_softc *);
   1120 static void	wm_release_hw_control(struct wm_softc *);
   1121 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1122 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1123 static void	wm_init_manageability(struct wm_softc *);
   1124 static void	wm_release_manageability(struct wm_softc *);
   1125 static void	wm_get_wakeup(struct wm_softc *);
   1126 static int	wm_ulp_disable(struct wm_softc *);
   1127 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1128 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1129 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1130 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1131 static void	wm_enable_wakeup(struct wm_softc *);
   1132 static void	wm_disable_aspm(struct wm_softc *);
   1133 /* LPLU (Low Power Link Up) */
   1134 static void	wm_lplu_d0_disable(struct wm_softc *);
   1135 /* EEE */
   1136 static int	wm_set_eee_i350(struct wm_softc *);
   1137 static int	wm_set_eee_pchlan(struct wm_softc *);
   1138 static int	wm_set_eee(struct wm_softc *);
   1139 
   1140 /*
   1141  * Workarounds (mainly PHY related).
   1142  * Basically, PHY's workarounds are in the PHY drivers.
   1143  */
   1144 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1145 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1146 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1147 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1148 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1149 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1150 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1151 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1152 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1153 static int	wm_k1_workaround_lv(struct wm_softc *);
   1154 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1155 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1156 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1157 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1158 static void	wm_reset_init_script_82575(struct wm_softc *);
   1159 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1160 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1161 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1162 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1163 static int	wm_pll_workaround_i210(struct wm_softc *);
   1164 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1165 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1166 static void	wm_set_linkdown_discard(struct wm_softc *);
   1167 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1168 
   1169 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1170 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1171 #ifdef WM_DEBUG
   1172 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1173 #endif
   1174 
   1175 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1176     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1177 
   1178 /*
   1179  * Devices supported by this driver.
   1180  */
   1181 static const struct wm_product {
   1182 	pci_vendor_id_t		wmp_vendor;
   1183 	pci_product_id_t	wmp_product;
   1184 	const char		*wmp_name;
   1185 	wm_chip_type		wmp_type;
   1186 	uint32_t		wmp_flags;
   1187 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1188 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1189 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1190 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1191 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1192 } wm_products[] = {
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1194 	  "Intel i82542 1000BASE-X Ethernet",
   1195 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1198 	  "Intel i82543GC 1000BASE-X Ethernet",
   1199 	  WM_T_82543,		WMP_F_FIBER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1202 	  "Intel i82543GC 1000BASE-T Ethernet",
   1203 	  WM_T_82543,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1206 	  "Intel i82544EI 1000BASE-T Ethernet",
   1207 	  WM_T_82544,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1210 	  "Intel i82544EI 1000BASE-X Ethernet",
   1211 	  WM_T_82544,		WMP_F_FIBER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1214 	  "Intel i82544GC 1000BASE-T Ethernet",
   1215 	  WM_T_82544,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1218 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1219 	  WM_T_82544,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1222 	  "Intel i82540EM 1000BASE-T Ethernet",
   1223 	  WM_T_82540,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1226 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1227 	  WM_T_82540,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1230 	  "Intel i82540EP 1000BASE-T Ethernet",
   1231 	  WM_T_82540,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1234 	  "Intel i82540EP 1000BASE-T Ethernet",
   1235 	  WM_T_82540,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1238 	  "Intel i82540EP 1000BASE-T Ethernet",
   1239 	  WM_T_82540,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1242 	  "Intel i82545EM 1000BASE-T Ethernet",
   1243 	  WM_T_82545,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1246 	  "Intel i82545GM 1000BASE-T Ethernet",
   1247 	  WM_T_82545_3,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1250 	  "Intel i82545GM 1000BASE-X Ethernet",
   1251 	  WM_T_82545_3,		WMP_F_FIBER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1254 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1255 	  WM_T_82545_3,		WMP_F_SERDES },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1258 	  "Intel i82546EB 1000BASE-T Ethernet",
   1259 	  WM_T_82546,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1262 	  "Intel i82546EB 1000BASE-T Ethernet",
   1263 	  WM_T_82546,		WMP_F_COPPER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1266 	  "Intel i82545EM 1000BASE-X Ethernet",
   1267 	  WM_T_82545,		WMP_F_FIBER },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1270 	  "Intel i82546EB 1000BASE-X Ethernet",
   1271 	  WM_T_82546,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1274 	  "Intel i82546GB 1000BASE-T Ethernet",
   1275 	  WM_T_82546_3,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1278 	  "Intel i82546GB 1000BASE-X Ethernet",
   1279 	  WM_T_82546_3,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1282 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82546_3,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1286 	  "i82546GB quad-port Gigabit Ethernet",
   1287 	  WM_T_82546_3,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1290 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1291 	  WM_T_82546_3,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1294 	  "Intel PRO/1000MT (82546GB)",
   1295 	  WM_T_82546_3,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1298 	  "Intel i82541EI 1000BASE-T Ethernet",
   1299 	  WM_T_82541,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1302 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1303 	  WM_T_82541,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1306 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1307 	  WM_T_82541,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1310 	  "Intel i82541ER 1000BASE-T Ethernet",
   1311 	  WM_T_82541_2,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1314 	  "Intel i82541GI 1000BASE-T Ethernet",
   1315 	  WM_T_82541_2,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1318 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1319 	  WM_T_82541_2,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1322 	  "Intel i82541PI 1000BASE-T Ethernet",
   1323 	  WM_T_82541_2,		WMP_F_COPPER },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1326 	  "Intel i82547EI 1000BASE-T Ethernet",
   1327 	  WM_T_82547,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1330 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1331 	  WM_T_82547,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1334 	  "Intel i82547GI 1000BASE-T Ethernet",
   1335 	  WM_T_82547_2,		WMP_F_COPPER },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1338 	  "Intel PRO/1000 PT (82571EB)",
   1339 	  WM_T_82571,		WMP_F_COPPER },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1342 	  "Intel PRO/1000 PF (82571EB)",
   1343 	  WM_T_82571,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1346 	  "Intel PRO/1000 PB (82571EB)",
   1347 	  WM_T_82571,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1350 	  "Intel PRO/1000 QT (82571EB)",
   1351 	  WM_T_82571,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1354 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1355 	  WM_T_82571,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1358 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1359 	  WM_T_82571,		WMP_F_COPPER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1362 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1363 	  WM_T_82571,		WMP_F_SERDES },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1366 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1367 	  WM_T_82571,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1370 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1371 	  WM_T_82571,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1374 	  "Intel i82572EI 1000baseT Ethernet",
   1375 	  WM_T_82572,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1378 	  "Intel i82572EI 1000baseX Ethernet",
   1379 	  WM_T_82572,		WMP_F_FIBER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1382 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1383 	  WM_T_82572,		WMP_F_SERDES },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1386 	  "Intel i82572EI 1000baseT Ethernet",
   1387 	  WM_T_82572,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1390 	  "Intel i82573E",
   1391 	  WM_T_82573,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1394 	  "Intel i82573E IAMT",
   1395 	  WM_T_82573,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1398 	  "Intel i82573L Gigabit Ethernet",
   1399 	  WM_T_82573,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1402 	  "Intel i82574L",
   1403 	  WM_T_82574,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1406 	  "Intel i82574L",
   1407 	  WM_T_82574,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1410 	  "Intel i82583V",
   1411 	  WM_T_82583,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1414 	  "i80003 dual 1000baseT Ethernet",
   1415 	  WM_T_80003,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1418 	  "i80003 dual 1000baseX Ethernet",
   1419 	  WM_T_80003,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1422 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1423 	  WM_T_80003,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1426 	  "Intel i80003 1000baseT Ethernet",
   1427 	  WM_T_80003,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1430 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1431 	  WM_T_80003,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1434 	  "Intel i82801H (M_AMT) LAN Controller",
   1435 	  WM_T_ICH8,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1437 	  "Intel i82801H (AMT) LAN Controller",
   1438 	  WM_T_ICH8,		WMP_F_COPPER },
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1440 	  "Intel i82801H LAN Controller",
   1441 	  WM_T_ICH8,		WMP_F_COPPER },
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1443 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1444 	  WM_T_ICH8,		WMP_F_COPPER },
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1446 	  "Intel i82801H (M) LAN Controller",
   1447 	  WM_T_ICH8,		WMP_F_COPPER },
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1449 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1450 	  WM_T_ICH8,		WMP_F_COPPER },
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1452 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1453 	  WM_T_ICH8,		WMP_F_COPPER },
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1455 	  "82567V-3 LAN Controller",
   1456 	  WM_T_ICH8,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1458 	  "82801I (AMT) LAN Controller",
   1459 	  WM_T_ICH9,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1461 	  "82801I 10/100 LAN Controller",
   1462 	  WM_T_ICH9,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1464 	  "82801I (G) 10/100 LAN Controller",
   1465 	  WM_T_ICH9,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1467 	  "82801I (GT) 10/100 LAN Controller",
   1468 	  WM_T_ICH9,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1470 	  "82801I (C) LAN Controller",
   1471 	  WM_T_ICH9,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1473 	  "82801I mobile LAN Controller",
   1474 	  WM_T_ICH9,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1476 	  "82801I mobile (V) LAN Controller",
   1477 	  WM_T_ICH9,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1479 	  "82801I mobile (AMT) LAN Controller",
   1480 	  WM_T_ICH9,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1482 	  "82567LM-4 LAN Controller",
   1483 	  WM_T_ICH9,		WMP_F_COPPER },
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1485 	  "82567LM-2 LAN Controller",
   1486 	  WM_T_ICH10,		WMP_F_COPPER },
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1488 	  "82567LF-2 LAN Controller",
   1489 	  WM_T_ICH10,		WMP_F_COPPER },
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1491 	  "82567LM-3 LAN Controller",
   1492 	  WM_T_ICH10,		WMP_F_COPPER },
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1494 	  "82567LF-3 LAN Controller",
   1495 	  WM_T_ICH10,		WMP_F_COPPER },
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1497 	  "82567V-2 LAN Controller",
   1498 	  WM_T_ICH10,		WMP_F_COPPER },
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1500 	  "82567V-3? LAN Controller",
   1501 	  WM_T_ICH10,		WMP_F_COPPER },
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1503 	  "HANKSVILLE LAN Controller",
   1504 	  WM_T_ICH10,		WMP_F_COPPER },
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1506 	  "PCH LAN (82577LM) Controller",
   1507 	  WM_T_PCH,		WMP_F_COPPER },
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1509 	  "PCH LAN (82577LC) Controller",
   1510 	  WM_T_PCH,		WMP_F_COPPER },
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1512 	  "PCH LAN (82578DM) Controller",
   1513 	  WM_T_PCH,		WMP_F_COPPER },
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1515 	  "PCH LAN (82578DC) Controller",
   1516 	  WM_T_PCH,		WMP_F_COPPER },
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1518 	  "PCH2 LAN (82579LM) Controller",
   1519 	  WM_T_PCH2,		WMP_F_COPPER },
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1521 	  "PCH2 LAN (82579V) Controller",
   1522 	  WM_T_PCH2,		WMP_F_COPPER },
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1524 	  "82575EB dual-1000baseT Ethernet",
   1525 	  WM_T_82575,		WMP_F_COPPER },
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1527 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1528 	  WM_T_82575,		WMP_F_SERDES },
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1530 	  "82575GB quad-1000baseT Ethernet",
   1531 	  WM_T_82575,		WMP_F_COPPER },
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1533 	  "82575GB quad-1000baseT Ethernet (PM)",
   1534 	  WM_T_82575,		WMP_F_COPPER },
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1536 	  "82576 1000BaseT Ethernet",
   1537 	  WM_T_82576,		WMP_F_COPPER },
   1538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1539 	  "82576 1000BaseX Ethernet",
   1540 	  WM_T_82576,		WMP_F_FIBER },
   1541 
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1543 	  "82576 gigabit Ethernet (SERDES)",
   1544 	  WM_T_82576,		WMP_F_SERDES },
   1545 
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1547 	  "82576 quad-1000BaseT Ethernet",
   1548 	  WM_T_82576,		WMP_F_COPPER },
   1549 
   1550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1551 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1552 	  WM_T_82576,		WMP_F_COPPER },
   1553 
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1555 	  "82576 gigabit Ethernet",
   1556 	  WM_T_82576,		WMP_F_COPPER },
   1557 
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1559 	  "82576 gigabit Ethernet (SERDES)",
   1560 	  WM_T_82576,		WMP_F_SERDES },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1562 	  "82576 quad-gigabit Ethernet (SERDES)",
   1563 	  WM_T_82576,		WMP_F_SERDES },
   1564 
   1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1566 	  "82580 1000BaseT Ethernet",
   1567 	  WM_T_82580,		WMP_F_COPPER },
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1569 	  "82580 1000BaseX Ethernet",
   1570 	  WM_T_82580,		WMP_F_FIBER },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1573 	  "82580 1000BaseT Ethernet (SERDES)",
   1574 	  WM_T_82580,		WMP_F_SERDES },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1577 	  "82580 gigabit Ethernet (SGMII)",
   1578 	  WM_T_82580,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1580 	  "82580 dual-1000BaseT Ethernet",
   1581 	  WM_T_82580,		WMP_F_COPPER },
   1582 
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1584 	  "82580 quad-1000BaseX Ethernet",
   1585 	  WM_T_82580,		WMP_F_FIBER },
   1586 
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1588 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1589 	  WM_T_82580,		WMP_F_COPPER },
   1590 
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1592 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1593 	  WM_T_82580,		WMP_F_SERDES },
   1594 
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1596 	  "DH89XXCC 1000BASE-KX Ethernet",
   1597 	  WM_T_82580,		WMP_F_SERDES },
   1598 
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1600 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1601 	  WM_T_82580,		WMP_F_SERDES },
   1602 
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1604 	  "I350 Gigabit Network Connection",
   1605 	  WM_T_I350,		WMP_F_COPPER },
   1606 
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1608 	  "I350 Gigabit Fiber Network Connection",
   1609 	  WM_T_I350,		WMP_F_FIBER },
   1610 
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1612 	  "I350 Gigabit Backplane Connection",
   1613 	  WM_T_I350,		WMP_F_SERDES },
   1614 
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1616 	  "I350 Quad Port Gigabit Ethernet",
   1617 	  WM_T_I350,		WMP_F_SERDES },
   1618 
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1620 	  "I350 Gigabit Connection",
   1621 	  WM_T_I350,		WMP_F_COPPER },
   1622 
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1624 	  "I354 Gigabit Ethernet (KX)",
   1625 	  WM_T_I354,		WMP_F_SERDES },
   1626 
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1628 	  "I354 Gigabit Ethernet (SGMII)",
   1629 	  WM_T_I354,		WMP_F_COPPER },
   1630 
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1632 	  "I354 Gigabit Ethernet (2.5G)",
   1633 	  WM_T_I354,		WMP_F_COPPER },
   1634 
   1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1636 	  "I210-T1 Ethernet Server Adapter",
   1637 	  WM_T_I210,		WMP_F_COPPER },
   1638 
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1640 	  "I210 Ethernet (Copper OEM)",
   1641 	  WM_T_I210,		WMP_F_COPPER },
   1642 
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1644 	  "I210 Ethernet (Copper IT)",
   1645 	  WM_T_I210,		WMP_F_COPPER },
   1646 
   1647 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1648 	  "I210 Ethernet (Copper, FLASH less)",
   1649 	  WM_T_I210,		WMP_F_COPPER },
   1650 
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1652 	  "I210 Gigabit Ethernet (Fiber)",
   1653 	  WM_T_I210,		WMP_F_FIBER },
   1654 
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1656 	  "I210 Gigabit Ethernet (SERDES)",
   1657 	  WM_T_I210,		WMP_F_SERDES },
   1658 
   1659 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1660 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1661 	  WM_T_I210,		WMP_F_SERDES },
   1662 
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1664 	  "I210 Gigabit Ethernet (SGMII)",
   1665 	  WM_T_I210,		WMP_F_COPPER },
   1666 
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1668 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1669 	  WM_T_I210,		WMP_F_COPPER },
   1670 
   1671 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1672 	  "I211 Ethernet (COPPER)",
   1673 	  WM_T_I211,		WMP_F_COPPER },
   1674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1675 	  "I217 V Ethernet Connection",
   1676 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1678 	  "I217 LM Ethernet Connection",
   1679 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1681 	  "I218 V Ethernet Connection",
   1682 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1683 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1684 	  "I218 V Ethernet Connection",
   1685 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1687 	  "I218 V Ethernet Connection",
   1688 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1690 	  "I218 LM Ethernet Connection",
   1691 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1693 	  "I218 LM Ethernet Connection",
   1694 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1695 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1696 	  "I218 LM Ethernet Connection",
   1697 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1699 	  "I219 LM Ethernet Connection",
   1700 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1702 	  "I219 LM (2) Ethernet Connection",
   1703 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1705 	  "I219 LM (3) Ethernet Connection",
   1706 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1707 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1708 	  "I219 LM (4) Ethernet Connection",
   1709 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1711 	  "I219 LM (5) Ethernet Connection",
   1712 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1714 	  "I219 LM (6) Ethernet Connection",
   1715 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1717 	  "I219 LM (7) Ethernet Connection",
   1718 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1719 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1720 	  "I219 LM (8) Ethernet Connection",
   1721 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1723 	  "I219 LM (9) Ethernet Connection",
   1724 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1726 	  "I219 LM (10) Ethernet Connection",
   1727 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1729 	  "I219 LM (11) Ethernet Connection",
   1730 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1731 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1732 	  "I219 LM (12) Ethernet Connection",
   1733 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1735 	  "I219 LM (13) Ethernet Connection",
   1736 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1738 	  "I219 LM (14) Ethernet Connection",
   1739 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1741 	  "I219 LM (15) Ethernet Connection",
   1742 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1743 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1744 	  "I219 LM (16) Ethernet Connection",
   1745 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
   1746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1747 	  "I219 LM (17) Ethernet Connection",
   1748 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
   1749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1750 	  "I219 LM (18) Ethernet Connection",
   1751 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1753 	  "I219 LM (19) Ethernet Connection",
   1754 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1755 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM20,
   1756 	  "I219 LM (20) Ethernet Connection",
   1757 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM21,
   1759 	  "I219 LM (21) Ethernet Connection",
   1760 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM22,
   1762 	  "I219 LM (22) Ethernet Connection",
   1763 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
   1764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM23,
   1765 	  "I219 LM (23) Ethernet Connection",
   1766 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
   1767 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1768 	  "I219 V Ethernet Connection",
   1769 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1771 	  "I219 V (2) Ethernet Connection",
   1772 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1774 	  "I219 V (4) Ethernet Connection",
   1775 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1777 	  "I219 V (5) Ethernet Connection",
   1778 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1779 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1780 	  "I219 V (6) Ethernet Connection",
   1781 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1783 	  "I219 V (7) Ethernet Connection",
   1784 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1786 	  "I219 V (8) Ethernet Connection",
   1787 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1789 	  "I219 V (9) Ethernet Connection",
   1790 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1791 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1792 	  "I219 V (10) Ethernet Connection",
   1793 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1795 	  "I219 V (11) Ethernet Connection",
   1796 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1798 	  "I219 V (12) Ethernet Connection",
   1799 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1801 	  "I219 V (13) Ethernet Connection",
   1802 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1804 	  "I219 V (14) Ethernet Connection",
   1805 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1807 	  "I219 V (15) Ethernet Connection",
   1808 	  WM_T_PCH_TGP,		WMP_F_COPPER },
   1809 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1810 	  "I219 V (16) Ethernet Connection",
   1811 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
   1812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1813 	  "I219 V (17) Ethernet Connection",
   1814 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
   1815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1816 	  "I219 V (18) Ethernet Connection",
   1817 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1819 	  "I219 V (19) Ethernet Connection",
   1820 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V20,
   1822 	  "I219 V (20) Ethernet Connection",
   1823 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V21,
   1825 	  "I219 V (21) Ethernet Connection",
   1826 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
   1827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V22,
   1828 	  "I219 V (22) Ethernet Connection",
   1829 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
   1830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V23,
   1831 	  "I219 V (23) Ethernet Connection",
   1832 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
   1833 	{ 0,			0,
   1834 	  NULL,
   1835 	  0,			0 },
   1836 };
   1837 
   1838 /*
   1839  * Register read/write functions.
   1840  * Other than CSR_{READ|WRITE}().
   1841  */
   1842 
   1843 #if 0 /* Not currently used */
   1844 static inline uint32_t
   1845 wm_io_read(struct wm_softc *sc, int reg)
   1846 {
   1847 
   1848 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1849 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1850 }
   1851 #endif
   1852 
   1853 static inline void
   1854 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1855 {
   1856 
   1857 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1858 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1859 }
   1860 
   1861 static inline void
   1862 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1863     uint32_t data)
   1864 {
   1865 	uint32_t regval;
   1866 	int i;
   1867 
   1868 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1869 
   1870 	CSR_WRITE(sc, reg, regval);
   1871 
   1872 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1873 		delay(5);
   1874 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1875 			break;
   1876 	}
   1877 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1878 		aprint_error("%s: WARNING:"
   1879 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1880 		    device_xname(sc->sc_dev), reg);
   1881 	}
   1882 }
   1883 
   1884 static inline void
   1885 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1886 {
   1887 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1888 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1889 }
   1890 
   1891 /*
   1892  * Descriptor sync/init functions.
   1893  */
   1894 static inline void
   1895 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1896 {
   1897 	struct wm_softc *sc = txq->txq_sc;
   1898 
   1899 	/* If it will wrap around, sync to the end of the ring. */
   1900 	if ((start + num) > WM_NTXDESC(txq)) {
   1901 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1902 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1903 		    (WM_NTXDESC(txq) - start), ops);
   1904 		num -= (WM_NTXDESC(txq) - start);
   1905 		start = 0;
   1906 	}
   1907 
   1908 	/* Now sync whatever is left. */
   1909 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1910 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1911 }
   1912 
   1913 static inline void
   1914 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1915 {
   1916 	struct wm_softc *sc = rxq->rxq_sc;
   1917 
   1918 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1919 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1920 }
   1921 
   1922 static inline void
   1923 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1924 {
   1925 	struct wm_softc *sc = rxq->rxq_sc;
   1926 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1927 	struct mbuf *m = rxs->rxs_mbuf;
   1928 
   1929 	/*
   1930 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1931 	 * so that the payload after the Ethernet header is aligned
   1932 	 * to a 4-byte boundary.
   1933 
   1934 	 * XXX BRAINDAMAGE ALERT!
   1935 	 * The stupid chip uses the same size for every buffer, which
   1936 	 * is set in the Receive Control register.  We are using the 2K
   1937 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1938 	 * reason, we can't "scoot" packets longer than the standard
   1939 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1940 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1941 	 * the upper layer copy the headers.
   1942 	 */
   1943 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1944 
   1945 	if (sc->sc_type == WM_T_82574) {
   1946 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1947 		rxd->erx_data.erxd_addr =
   1948 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1949 		rxd->erx_data.erxd_dd = 0;
   1950 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1951 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1952 
   1953 		rxd->nqrx_data.nrxd_paddr =
   1954 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1955 		/* Currently, split header is not supported. */
   1956 		rxd->nqrx_data.nrxd_haddr = 0;
   1957 	} else {
   1958 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1959 
   1960 		wm_set_dma_addr(&rxd->wrx_addr,
   1961 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1962 		rxd->wrx_len = 0;
   1963 		rxd->wrx_cksum = 0;
   1964 		rxd->wrx_status = 0;
   1965 		rxd->wrx_errors = 0;
   1966 		rxd->wrx_special = 0;
   1967 	}
   1968 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1969 
   1970 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1971 }
   1972 
   1973 /*
   1974  * Device driver interface functions and commonly used functions.
   1975  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1976  */
   1977 
   1978 /* Lookup supported device table */
   1979 static const struct wm_product *
   1980 wm_lookup(const struct pci_attach_args *pa)
   1981 {
   1982 	const struct wm_product *wmp;
   1983 
   1984 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1985 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1986 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1987 			return wmp;
   1988 	}
   1989 	return NULL;
   1990 }
   1991 
   1992 /* The match function (ca_match) */
   1993 static int
   1994 wm_match(device_t parent, cfdata_t cf, void *aux)
   1995 {
   1996 	struct pci_attach_args *pa = aux;
   1997 
   1998 	if (wm_lookup(pa) != NULL)
   1999 		return 1;
   2000 
   2001 	return 0;
   2002 }
   2003 
   2004 /* The attach function (ca_attach) */
   2005 static void
   2006 wm_attach(device_t parent, device_t self, void *aux)
   2007 {
   2008 	struct wm_softc *sc = device_private(self);
   2009 	struct pci_attach_args *pa = aux;
   2010 	prop_dictionary_t dict;
   2011 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2012 	pci_chipset_tag_t pc = pa->pa_pc;
   2013 	int counts[PCI_INTR_TYPE_SIZE];
   2014 	pci_intr_type_t max_type;
   2015 	const char *eetype, *xname;
   2016 	bus_space_tag_t memt;
   2017 	bus_space_handle_t memh;
   2018 	bus_size_t memsize;
   2019 	int memh_valid;
   2020 	int i, error;
   2021 	const struct wm_product *wmp;
   2022 	prop_data_t ea;
   2023 	prop_number_t pn;
   2024 	uint8_t enaddr[ETHER_ADDR_LEN];
   2025 	char buf[256];
   2026 	char wqname[MAXCOMLEN];
   2027 	uint16_t cfg1, cfg2, swdpin, nvmword;
   2028 	pcireg_t preg, memtype;
   2029 	uint16_t eeprom_data, apme_mask;
   2030 	bool force_clear_smbi;
   2031 	uint32_t link_mode;
   2032 	uint32_t reg;
   2033 
   2034 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   2035 	sc->sc_debug = WM_DEBUG_DEFAULT;
   2036 #endif
   2037 	sc->sc_dev = self;
   2038 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   2039 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   2040 	sc->sc_core_stopping = false;
   2041 
   2042 	wmp = wm_lookup(pa);
   2043 #ifdef DIAGNOSTIC
   2044 	if (wmp == NULL) {
   2045 		printf("\n");
   2046 		panic("wm_attach: impossible");
   2047 	}
   2048 #endif
   2049 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   2050 
   2051 	sc->sc_pc = pa->pa_pc;
   2052 	sc->sc_pcitag = pa->pa_tag;
   2053 
   2054 	if (pci_dma64_available(pa)) {
   2055 		aprint_verbose(", 64-bit DMA");
   2056 		sc->sc_dmat = pa->pa_dmat64;
   2057 	} else {
   2058 		aprint_verbose(", 32-bit DMA");
   2059 		sc->sc_dmat = pa->pa_dmat;
   2060 	}
   2061 
   2062 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2063 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2064 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2065 
   2066 	sc->sc_type = wmp->wmp_type;
   2067 
   2068 	/* Set default function pointers */
   2069 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2070 	sc->phy.release = sc->nvm.release = wm_put_null;
   2071 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2072 
   2073 	if (sc->sc_type < WM_T_82543) {
   2074 		if (sc->sc_rev < 2) {
   2075 			aprint_error_dev(sc->sc_dev,
   2076 			    "i82542 must be at least rev. 2\n");
   2077 			return;
   2078 		}
   2079 		if (sc->sc_rev < 3)
   2080 			sc->sc_type = WM_T_82542_2_0;
   2081 	}
   2082 
   2083 	/*
   2084 	 * Disable MSI for Errata:
   2085 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2086 	 *
   2087 	 *  82544: Errata 25
   2088 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2089 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2090 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2091 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2092 	 *
   2093 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2094 	 *
   2095 	 *  82571 & 82572: Errata 63
   2096 	 */
   2097 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2098 	    || (sc->sc_type == WM_T_82572))
   2099 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2100 
   2101 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2102 	    || (sc->sc_type == WM_T_82580)
   2103 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2104 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2105 		sc->sc_flags |= WM_F_NEWQUEUE;
   2106 
   2107 	/* Set device properties (mactype) */
   2108 	dict = device_properties(sc->sc_dev);
   2109 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2110 
   2111 	/*
   2112 	 * Map the device.  All devices support memory-mapped acccess,
   2113 	 * and it is really required for normal operation.
   2114 	 */
   2115 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2116 	switch (memtype) {
   2117 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2118 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2119 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2120 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2121 		break;
   2122 	default:
   2123 		memh_valid = 0;
   2124 		break;
   2125 	}
   2126 
   2127 	if (memh_valid) {
   2128 		sc->sc_st = memt;
   2129 		sc->sc_sh = memh;
   2130 		sc->sc_ss = memsize;
   2131 	} else {
   2132 		aprint_error_dev(sc->sc_dev,
   2133 		    "unable to map device registers\n");
   2134 		return;
   2135 	}
   2136 
   2137 	/*
   2138 	 * In addition, i82544 and later support I/O mapped indirect
   2139 	 * register access.  It is not desirable (nor supported in
   2140 	 * this driver) to use it for normal operation, though it is
   2141 	 * required to work around bugs in some chip versions.
   2142 	 */
   2143 	switch (sc->sc_type) {
   2144 	case WM_T_82544:
   2145 	case WM_T_82541:
   2146 	case WM_T_82541_2:
   2147 	case WM_T_82547:
   2148 	case WM_T_82547_2:
   2149 		/* First we have to find the I/O BAR. */
   2150 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2151 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2152 			if (memtype == PCI_MAPREG_TYPE_IO)
   2153 				break;
   2154 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2155 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2156 				i += 4;	/* skip high bits, too */
   2157 		}
   2158 		if (i < PCI_MAPREG_END) {
   2159 			/*
   2160 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2161 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2162 			 * It's no problem because newer chips has no this
   2163 			 * bug.
   2164 			 *
   2165 			 * The i8254x doesn't apparently respond when the
   2166 			 * I/O BAR is 0, which looks somewhat like it's not
   2167 			 * been configured.
   2168 			 */
   2169 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2170 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2171 				aprint_error_dev(sc->sc_dev,
   2172 				    "WARNING: I/O BAR at zero.\n");
   2173 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2174 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2175 			    == 0) {
   2176 				sc->sc_flags |= WM_F_IOH_VALID;
   2177 			} else
   2178 				aprint_error_dev(sc->sc_dev,
   2179 				    "WARNING: unable to map I/O space\n");
   2180 		}
   2181 		break;
   2182 	default:
   2183 		break;
   2184 	}
   2185 
   2186 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2187 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2188 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2189 	if (sc->sc_type < WM_T_82542_2_1)
   2190 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2191 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2192 
   2193 	/* Power up chip */
   2194 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2195 	    && error != EOPNOTSUPP) {
   2196 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2197 		return;
   2198 	}
   2199 
   2200 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2201 	/*
   2202 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2203 	 * resource.
   2204 	 */
   2205 	if (sc->sc_nqueues > 1) {
   2206 		max_type = PCI_INTR_TYPE_MSIX;
   2207 		/*
   2208 		 *  82583 has a MSI-X capability in the PCI configuration space
   2209 		 * but it doesn't support it. At least the document doesn't
   2210 		 * say anything about MSI-X.
   2211 		 */
   2212 		counts[PCI_INTR_TYPE_MSIX]
   2213 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2214 	} else {
   2215 		max_type = PCI_INTR_TYPE_MSI;
   2216 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2217 	}
   2218 
   2219 	/* Allocation settings */
   2220 	counts[PCI_INTR_TYPE_MSI] = 1;
   2221 	counts[PCI_INTR_TYPE_INTX] = 1;
   2222 	/* overridden by disable flags */
   2223 	if (wm_disable_msi != 0) {
   2224 		counts[PCI_INTR_TYPE_MSI] = 0;
   2225 		if (wm_disable_msix != 0) {
   2226 			max_type = PCI_INTR_TYPE_INTX;
   2227 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2228 		}
   2229 	} else if (wm_disable_msix != 0) {
   2230 		max_type = PCI_INTR_TYPE_MSI;
   2231 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2232 	}
   2233 
   2234 alloc_retry:
   2235 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2236 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2237 		return;
   2238 	}
   2239 
   2240 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2241 		error = wm_setup_msix(sc);
   2242 		if (error) {
   2243 			pci_intr_release(pc, sc->sc_intrs,
   2244 			    counts[PCI_INTR_TYPE_MSIX]);
   2245 
   2246 			/* Setup for MSI: Disable MSI-X */
   2247 			max_type = PCI_INTR_TYPE_MSI;
   2248 			counts[PCI_INTR_TYPE_MSI] = 1;
   2249 			counts[PCI_INTR_TYPE_INTX] = 1;
   2250 			goto alloc_retry;
   2251 		}
   2252 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2253 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2254 		error = wm_setup_legacy(sc);
   2255 		if (error) {
   2256 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2257 			    counts[PCI_INTR_TYPE_MSI]);
   2258 
   2259 			/* The next try is for INTx: Disable MSI */
   2260 			max_type = PCI_INTR_TYPE_INTX;
   2261 			counts[PCI_INTR_TYPE_INTX] = 1;
   2262 			goto alloc_retry;
   2263 		}
   2264 	} else {
   2265 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2266 		error = wm_setup_legacy(sc);
   2267 		if (error) {
   2268 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2269 			    counts[PCI_INTR_TYPE_INTX]);
   2270 			return;
   2271 		}
   2272 	}
   2273 
   2274 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2275 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2276 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2277 	    WQ_PERCPU | WQ_MPSAFE);
   2278 	if (error) {
   2279 		aprint_error_dev(sc->sc_dev,
   2280 		    "unable to create TxRx workqueue\n");
   2281 		goto out;
   2282 	}
   2283 
   2284 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2285 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2286 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2287 	    WQ_MPSAFE);
   2288 	if (error) {
   2289 		workqueue_destroy(sc->sc_queue_wq);
   2290 		aprint_error_dev(sc->sc_dev,
   2291 		    "unable to create reset workqueue\n");
   2292 		goto out;
   2293 	}
   2294 
   2295 	/*
   2296 	 * Check the function ID (unit number of the chip).
   2297 	 */
   2298 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2299 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2300 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2301 	    || (sc->sc_type == WM_T_82580)
   2302 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2303 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2304 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2305 	else
   2306 		sc->sc_funcid = 0;
   2307 
   2308 	/*
   2309 	 * Determine a few things about the bus we're connected to.
   2310 	 */
   2311 	if (sc->sc_type < WM_T_82543) {
   2312 		/* We don't really know the bus characteristics here. */
   2313 		sc->sc_bus_speed = 33;
   2314 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2315 		/*
   2316 		 * CSA (Communication Streaming Architecture) is about as fast
   2317 		 * a 32-bit 66MHz PCI Bus.
   2318 		 */
   2319 		sc->sc_flags |= WM_F_CSA;
   2320 		sc->sc_bus_speed = 66;
   2321 		aprint_verbose_dev(sc->sc_dev,
   2322 		    "Communication Streaming Architecture\n");
   2323 		if (sc->sc_type == WM_T_82547) {
   2324 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2325 			callout_setfunc(&sc->sc_txfifo_ch,
   2326 			    wm_82547_txfifo_stall, sc);
   2327 			aprint_verbose_dev(sc->sc_dev,
   2328 			    "using 82547 Tx FIFO stall work-around\n");
   2329 		}
   2330 	} else if (sc->sc_type >= WM_T_82571) {
   2331 		sc->sc_flags |= WM_F_PCIE;
   2332 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2333 		    && (sc->sc_type != WM_T_ICH10)
   2334 		    && (sc->sc_type != WM_T_PCH)
   2335 		    && (sc->sc_type != WM_T_PCH2)
   2336 		    && (sc->sc_type != WM_T_PCH_LPT)
   2337 		    && (sc->sc_type != WM_T_PCH_SPT)
   2338 		    && (sc->sc_type != WM_T_PCH_CNP)
   2339 		    && (sc->sc_type != WM_T_PCH_TGP)) {
   2340 			/* ICH* and PCH* have no PCIe capability registers */
   2341 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2342 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2343 				NULL) == 0)
   2344 				aprint_error_dev(sc->sc_dev,
   2345 				    "unable to find PCIe capability\n");
   2346 		}
   2347 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2348 	} else {
   2349 		reg = CSR_READ(sc, WMREG_STATUS);
   2350 		if (reg & STATUS_BUS64)
   2351 			sc->sc_flags |= WM_F_BUS64;
   2352 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2353 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2354 
   2355 			sc->sc_flags |= WM_F_PCIX;
   2356 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2357 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2358 				aprint_error_dev(sc->sc_dev,
   2359 				    "unable to find PCIX capability\n");
   2360 			else if (sc->sc_type != WM_T_82545_3 &&
   2361 			    sc->sc_type != WM_T_82546_3) {
   2362 				/*
   2363 				 * Work around a problem caused by the BIOS
   2364 				 * setting the max memory read byte count
   2365 				 * incorrectly.
   2366 				 */
   2367 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2368 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2369 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2370 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2371 
   2372 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2373 				    PCIX_CMD_BYTECNT_SHIFT;
   2374 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2375 				    PCIX_STATUS_MAXB_SHIFT;
   2376 				if (bytecnt > maxb) {
   2377 					aprint_verbose_dev(sc->sc_dev,
   2378 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2379 					    512 << bytecnt, 512 << maxb);
   2380 					pcix_cmd = (pcix_cmd &
   2381 					    ~PCIX_CMD_BYTECNT_MASK) |
   2382 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2383 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2384 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2385 					    pcix_cmd);
   2386 				}
   2387 			}
   2388 		}
   2389 		/*
   2390 		 * The quad port adapter is special; it has a PCIX-PCIX
   2391 		 * bridge on the board, and can run the secondary bus at
   2392 		 * a higher speed.
   2393 		 */
   2394 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2395 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2396 								      : 66;
   2397 		} else if (sc->sc_flags & WM_F_PCIX) {
   2398 			switch (reg & STATUS_PCIXSPD_MASK) {
   2399 			case STATUS_PCIXSPD_50_66:
   2400 				sc->sc_bus_speed = 66;
   2401 				break;
   2402 			case STATUS_PCIXSPD_66_100:
   2403 				sc->sc_bus_speed = 100;
   2404 				break;
   2405 			case STATUS_PCIXSPD_100_133:
   2406 				sc->sc_bus_speed = 133;
   2407 				break;
   2408 			default:
   2409 				aprint_error_dev(sc->sc_dev,
   2410 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2411 				    reg & STATUS_PCIXSPD_MASK);
   2412 				sc->sc_bus_speed = 66;
   2413 				break;
   2414 			}
   2415 		} else
   2416 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2417 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2418 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2419 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2420 	}
   2421 
   2422 	/* clear interesting stat counters */
   2423 	CSR_READ(sc, WMREG_COLC);
   2424 	CSR_READ(sc, WMREG_RXERRC);
   2425 
   2426 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2427 	    || (sc->sc_type >= WM_T_ICH8))
   2428 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2429 	if (sc->sc_type >= WM_T_ICH8)
   2430 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2431 
   2432 	/* Set PHY, NVM mutex related stuff */
   2433 	switch (sc->sc_type) {
   2434 	case WM_T_82542_2_0:
   2435 	case WM_T_82542_2_1:
   2436 	case WM_T_82543:
   2437 	case WM_T_82544:
   2438 		/* Microwire */
   2439 		sc->nvm.read = wm_nvm_read_uwire;
   2440 		sc->sc_nvm_wordsize = 64;
   2441 		sc->sc_nvm_addrbits = 6;
   2442 		break;
   2443 	case WM_T_82540:
   2444 	case WM_T_82545:
   2445 	case WM_T_82545_3:
   2446 	case WM_T_82546:
   2447 	case WM_T_82546_3:
   2448 		/* Microwire */
   2449 		sc->nvm.read = wm_nvm_read_uwire;
   2450 		reg = CSR_READ(sc, WMREG_EECD);
   2451 		if (reg & EECD_EE_SIZE) {
   2452 			sc->sc_nvm_wordsize = 256;
   2453 			sc->sc_nvm_addrbits = 8;
   2454 		} else {
   2455 			sc->sc_nvm_wordsize = 64;
   2456 			sc->sc_nvm_addrbits = 6;
   2457 		}
   2458 		sc->sc_flags |= WM_F_LOCK_EECD;
   2459 		sc->nvm.acquire = wm_get_eecd;
   2460 		sc->nvm.release = wm_put_eecd;
   2461 		break;
   2462 	case WM_T_82541:
   2463 	case WM_T_82541_2:
   2464 	case WM_T_82547:
   2465 	case WM_T_82547_2:
   2466 		reg = CSR_READ(sc, WMREG_EECD);
   2467 		/*
   2468 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2469 		 * on 8254[17], so set flags and functios before calling it.
   2470 		 */
   2471 		sc->sc_flags |= WM_F_LOCK_EECD;
   2472 		sc->nvm.acquire = wm_get_eecd;
   2473 		sc->nvm.release = wm_put_eecd;
   2474 		if (reg & EECD_EE_TYPE) {
   2475 			/* SPI */
   2476 			sc->nvm.read = wm_nvm_read_spi;
   2477 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2478 			wm_nvm_set_addrbits_size_eecd(sc);
   2479 		} else {
   2480 			/* Microwire */
   2481 			sc->nvm.read = wm_nvm_read_uwire;
   2482 			if ((reg & EECD_EE_ABITS) != 0) {
   2483 				sc->sc_nvm_wordsize = 256;
   2484 				sc->sc_nvm_addrbits = 8;
   2485 			} else {
   2486 				sc->sc_nvm_wordsize = 64;
   2487 				sc->sc_nvm_addrbits = 6;
   2488 			}
   2489 		}
   2490 		break;
   2491 	case WM_T_82571:
   2492 	case WM_T_82572:
   2493 		/* SPI */
   2494 		sc->nvm.read = wm_nvm_read_eerd;
   2495 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2496 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2497 		wm_nvm_set_addrbits_size_eecd(sc);
   2498 		sc->phy.acquire = wm_get_swsm_semaphore;
   2499 		sc->phy.release = wm_put_swsm_semaphore;
   2500 		sc->nvm.acquire = wm_get_nvm_82571;
   2501 		sc->nvm.release = wm_put_nvm_82571;
   2502 		break;
   2503 	case WM_T_82573:
   2504 	case WM_T_82574:
   2505 	case WM_T_82583:
   2506 		sc->nvm.read = wm_nvm_read_eerd;
   2507 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2508 		if (sc->sc_type == WM_T_82573) {
   2509 			sc->phy.acquire = wm_get_swsm_semaphore;
   2510 			sc->phy.release = wm_put_swsm_semaphore;
   2511 			sc->nvm.acquire = wm_get_nvm_82571;
   2512 			sc->nvm.release = wm_put_nvm_82571;
   2513 		} else {
   2514 			/* Both PHY and NVM use the same semaphore. */
   2515 			sc->phy.acquire = sc->nvm.acquire
   2516 			    = wm_get_swfwhw_semaphore;
   2517 			sc->phy.release = sc->nvm.release
   2518 			    = wm_put_swfwhw_semaphore;
   2519 		}
   2520 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2521 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2522 			sc->sc_nvm_wordsize = 2048;
   2523 		} else {
   2524 			/* SPI */
   2525 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2526 			wm_nvm_set_addrbits_size_eecd(sc);
   2527 		}
   2528 		break;
   2529 	case WM_T_82575:
   2530 	case WM_T_82576:
   2531 	case WM_T_82580:
   2532 	case WM_T_I350:
   2533 	case WM_T_I354:
   2534 	case WM_T_80003:
   2535 		/* SPI */
   2536 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2537 		wm_nvm_set_addrbits_size_eecd(sc);
   2538 		if ((sc->sc_type == WM_T_80003)
   2539 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2540 			sc->nvm.read = wm_nvm_read_eerd;
   2541 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2542 		} else {
   2543 			sc->nvm.read = wm_nvm_read_spi;
   2544 			sc->sc_flags |= WM_F_LOCK_EECD;
   2545 		}
   2546 		sc->phy.acquire = wm_get_phy_82575;
   2547 		sc->phy.release = wm_put_phy_82575;
   2548 		sc->nvm.acquire = wm_get_nvm_80003;
   2549 		sc->nvm.release = wm_put_nvm_80003;
   2550 		break;
   2551 	case WM_T_ICH8:
   2552 	case WM_T_ICH9:
   2553 	case WM_T_ICH10:
   2554 	case WM_T_PCH:
   2555 	case WM_T_PCH2:
   2556 	case WM_T_PCH_LPT:
   2557 		sc->nvm.read = wm_nvm_read_ich8;
   2558 		/* FLASH */
   2559 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2560 		sc->sc_nvm_wordsize = 2048;
   2561 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2562 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2563 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2564 			aprint_error_dev(sc->sc_dev,
   2565 			    "can't map FLASH registers\n");
   2566 			goto out;
   2567 		}
   2568 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2569 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2570 		    ICH_FLASH_SECTOR_SIZE;
   2571 		sc->sc_ich8_flash_bank_size =
   2572 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2573 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2574 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2575 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2576 		sc->sc_flashreg_offset = 0;
   2577 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2578 		sc->phy.release = wm_put_swflag_ich8lan;
   2579 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2580 		sc->nvm.release = wm_put_nvm_ich8lan;
   2581 		break;
   2582 	case WM_T_PCH_SPT:
   2583 	case WM_T_PCH_CNP:
   2584 	case WM_T_PCH_TGP:
   2585 		sc->nvm.read = wm_nvm_read_spt;
   2586 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2587 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2588 		sc->sc_flasht = sc->sc_st;
   2589 		sc->sc_flashh = sc->sc_sh;
   2590 		sc->sc_ich8_flash_base = 0;
   2591 		sc->sc_nvm_wordsize =
   2592 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2593 		    * NVM_SIZE_MULTIPLIER;
   2594 		/* It is size in bytes, we want words */
   2595 		sc->sc_nvm_wordsize /= 2;
   2596 		/* Assume 2 banks */
   2597 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2598 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2599 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2600 		sc->phy.release = wm_put_swflag_ich8lan;
   2601 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2602 		sc->nvm.release = wm_put_nvm_ich8lan;
   2603 		break;
   2604 	case WM_T_I210:
   2605 	case WM_T_I211:
   2606 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2607 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2608 		if (wm_nvm_flash_presence_i210(sc)) {
   2609 			sc->nvm.read = wm_nvm_read_eerd;
   2610 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2611 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2612 			wm_nvm_set_addrbits_size_eecd(sc);
   2613 		} else {
   2614 			sc->nvm.read = wm_nvm_read_invm;
   2615 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2616 			sc->sc_nvm_wordsize = INVM_SIZE;
   2617 		}
   2618 		sc->phy.acquire = wm_get_phy_82575;
   2619 		sc->phy.release = wm_put_phy_82575;
   2620 		sc->nvm.acquire = wm_get_nvm_80003;
   2621 		sc->nvm.release = wm_put_nvm_80003;
   2622 		break;
   2623 	default:
   2624 		break;
   2625 	}
   2626 
   2627 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2628 	switch (sc->sc_type) {
   2629 	case WM_T_82571:
   2630 	case WM_T_82572:
   2631 		reg = CSR_READ(sc, WMREG_SWSM2);
   2632 		if ((reg & SWSM2_LOCK) == 0) {
   2633 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2634 			force_clear_smbi = true;
   2635 		} else
   2636 			force_clear_smbi = false;
   2637 		break;
   2638 	case WM_T_82573:
   2639 	case WM_T_82574:
   2640 	case WM_T_82583:
   2641 		force_clear_smbi = true;
   2642 		break;
   2643 	default:
   2644 		force_clear_smbi = false;
   2645 		break;
   2646 	}
   2647 	if (force_clear_smbi) {
   2648 		reg = CSR_READ(sc, WMREG_SWSM);
   2649 		if ((reg & SWSM_SMBI) != 0)
   2650 			aprint_error_dev(sc->sc_dev,
   2651 			    "Please update the Bootagent\n");
   2652 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2653 	}
   2654 
   2655 	/*
   2656 	 * Defer printing the EEPROM type until after verifying the checksum
   2657 	 * This allows the EEPROM type to be printed correctly in the case
   2658 	 * that no EEPROM is attached.
   2659 	 */
   2660 	/*
   2661 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2662 	 * this for later, so we can fail future reads from the EEPROM.
   2663 	 */
   2664 	if (wm_nvm_validate_checksum(sc)) {
   2665 		/*
   2666 		 * Read twice again because some PCI-e parts fail the
   2667 		 * first check due to the link being in sleep state.
   2668 		 */
   2669 		if (wm_nvm_validate_checksum(sc))
   2670 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2671 	}
   2672 
   2673 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2674 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2675 	else {
   2676 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2677 		    sc->sc_nvm_wordsize);
   2678 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2679 			aprint_verbose("iNVM");
   2680 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2681 			aprint_verbose("FLASH(HW)");
   2682 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2683 			aprint_verbose("FLASH");
   2684 		else {
   2685 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2686 				eetype = "SPI";
   2687 			else
   2688 				eetype = "MicroWire";
   2689 			aprint_verbose("(%d address bits) %s EEPROM",
   2690 			    sc->sc_nvm_addrbits, eetype);
   2691 		}
   2692 	}
   2693 	wm_nvm_version(sc);
   2694 	aprint_verbose("\n");
   2695 
   2696 	/*
   2697 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2698 	 * incorrect.
   2699 	 */
   2700 	wm_gmii_setup_phytype(sc, 0, 0);
   2701 
   2702 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2703 	switch (sc->sc_type) {
   2704 	case WM_T_ICH8:
   2705 	case WM_T_ICH9:
   2706 	case WM_T_ICH10:
   2707 	case WM_T_PCH:
   2708 	case WM_T_PCH2:
   2709 	case WM_T_PCH_LPT:
   2710 	case WM_T_PCH_SPT:
   2711 	case WM_T_PCH_CNP:
   2712 	case WM_T_PCH_TGP:
   2713 		apme_mask = WUC_APME;
   2714 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2715 		if ((eeprom_data & apme_mask) != 0)
   2716 			sc->sc_flags |= WM_F_WOL;
   2717 		break;
   2718 	default:
   2719 		break;
   2720 	}
   2721 
   2722 	/* Reset the chip to a known state. */
   2723 	wm_reset(sc);
   2724 
   2725 	/*
   2726 	 * Check for I21[01] PLL workaround.
   2727 	 *
   2728 	 * Three cases:
   2729 	 * a) Chip is I211.
   2730 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2731 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2732 	 */
   2733 	if (sc->sc_type == WM_T_I211)
   2734 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2735 	if (sc->sc_type == WM_T_I210) {
   2736 		if (!wm_nvm_flash_presence_i210(sc))
   2737 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2738 		else if ((sc->sc_nvm_ver_major < 3)
   2739 		    || ((sc->sc_nvm_ver_major == 3)
   2740 			&& (sc->sc_nvm_ver_minor < 25))) {
   2741 			aprint_verbose_dev(sc->sc_dev,
   2742 			    "ROM image version %d.%d is older than 3.25\n",
   2743 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2744 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2745 		}
   2746 	}
   2747 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2748 		wm_pll_workaround_i210(sc);
   2749 
   2750 	wm_get_wakeup(sc);
   2751 
   2752 	/* Non-AMT based hardware can now take control from firmware */
   2753 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2754 		wm_get_hw_control(sc);
   2755 
   2756 	/*
   2757 	 * Read the Ethernet address from the EEPROM, if not first found
   2758 	 * in device properties.
   2759 	 */
   2760 	ea = prop_dictionary_get(dict, "mac-address");
   2761 	if (ea != NULL) {
   2762 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2763 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2764 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2765 	} else {
   2766 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2767 			aprint_error_dev(sc->sc_dev,
   2768 			    "unable to read Ethernet address\n");
   2769 			goto out;
   2770 		}
   2771 	}
   2772 
   2773 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2774 	    ether_sprintf(enaddr));
   2775 
   2776 	/*
   2777 	 * Read the config info from the EEPROM, and set up various
   2778 	 * bits in the control registers based on their contents.
   2779 	 */
   2780 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2781 	if (pn != NULL) {
   2782 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2783 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2784 	} else {
   2785 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2786 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2787 			goto out;
   2788 		}
   2789 	}
   2790 
   2791 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2792 	if (pn != NULL) {
   2793 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2794 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2795 	} else {
   2796 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2797 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2798 			goto out;
   2799 		}
   2800 	}
   2801 
   2802 	/* check for WM_F_WOL */
   2803 	switch (sc->sc_type) {
   2804 	case WM_T_82542_2_0:
   2805 	case WM_T_82542_2_1:
   2806 	case WM_T_82543:
   2807 		/* dummy? */
   2808 		eeprom_data = 0;
   2809 		apme_mask = NVM_CFG3_APME;
   2810 		break;
   2811 	case WM_T_82544:
   2812 		apme_mask = NVM_CFG2_82544_APM_EN;
   2813 		eeprom_data = cfg2;
   2814 		break;
   2815 	case WM_T_82546:
   2816 	case WM_T_82546_3:
   2817 	case WM_T_82571:
   2818 	case WM_T_82572:
   2819 	case WM_T_82573:
   2820 	case WM_T_82574:
   2821 	case WM_T_82583:
   2822 	case WM_T_80003:
   2823 	case WM_T_82575:
   2824 	case WM_T_82576:
   2825 		apme_mask = NVM_CFG3_APME;
   2826 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2827 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2828 		break;
   2829 	case WM_T_82580:
   2830 	case WM_T_I350:
   2831 	case WM_T_I354:
   2832 	case WM_T_I210:
   2833 	case WM_T_I211:
   2834 		apme_mask = NVM_CFG3_APME;
   2835 		wm_nvm_read(sc,
   2836 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2837 		    1, &eeprom_data);
   2838 		break;
   2839 	case WM_T_ICH8:
   2840 	case WM_T_ICH9:
   2841 	case WM_T_ICH10:
   2842 	case WM_T_PCH:
   2843 	case WM_T_PCH2:
   2844 	case WM_T_PCH_LPT:
   2845 	case WM_T_PCH_SPT:
   2846 	case WM_T_PCH_CNP:
   2847 	case WM_T_PCH_TGP:
   2848 		/* Already checked before wm_reset () */
   2849 		apme_mask = eeprom_data = 0;
   2850 		break;
   2851 	default: /* XXX 82540 */
   2852 		apme_mask = NVM_CFG3_APME;
   2853 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2854 		break;
   2855 	}
   2856 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2857 	if ((eeprom_data & apme_mask) != 0)
   2858 		sc->sc_flags |= WM_F_WOL;
   2859 
   2860 	/*
   2861 	 * We have the eeprom settings, now apply the special cases
   2862 	 * where the eeprom may be wrong or the board won't support
   2863 	 * wake on lan on a particular port
   2864 	 */
   2865 	switch (sc->sc_pcidevid) {
   2866 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2867 		sc->sc_flags &= ~WM_F_WOL;
   2868 		break;
   2869 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2870 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2871 		/* Wake events only supported on port A for dual fiber
   2872 		 * regardless of eeprom setting */
   2873 		if (sc->sc_funcid == 1)
   2874 			sc->sc_flags &= ~WM_F_WOL;
   2875 		break;
   2876 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2877 		/* If quad port adapter, disable WoL on all but port A */
   2878 		if (sc->sc_funcid != 0)
   2879 			sc->sc_flags &= ~WM_F_WOL;
   2880 		break;
   2881 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2882 		/* Wake events only supported on port A for dual fiber
   2883 		 * regardless of eeprom setting */
   2884 		if (sc->sc_funcid == 1)
   2885 			sc->sc_flags &= ~WM_F_WOL;
   2886 		break;
   2887 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2888 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2889 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2890 		/* If quad port adapter, disable WoL on all but port A */
   2891 		if (sc->sc_funcid != 0)
   2892 			sc->sc_flags &= ~WM_F_WOL;
   2893 		break;
   2894 	}
   2895 
   2896 	if (sc->sc_type >= WM_T_82575) {
   2897 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2898 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2899 			    nvmword);
   2900 			if ((sc->sc_type == WM_T_82575) ||
   2901 			    (sc->sc_type == WM_T_82576)) {
   2902 				/* Check NVM for autonegotiation */
   2903 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2904 				    != 0)
   2905 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2906 			}
   2907 			if ((sc->sc_type == WM_T_82575) ||
   2908 			    (sc->sc_type == WM_T_I350)) {
   2909 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2910 					sc->sc_flags |= WM_F_MAS;
   2911 			}
   2912 		}
   2913 	}
   2914 
   2915 	/*
   2916 	 * XXX need special handling for some multiple port cards
   2917 	 * to disable a paticular port.
   2918 	 */
   2919 
   2920 	if (sc->sc_type >= WM_T_82544) {
   2921 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2922 		if (pn != NULL) {
   2923 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2924 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2925 		} else {
   2926 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2927 				aprint_error_dev(sc->sc_dev,
   2928 				    "unable to read SWDPIN\n");
   2929 				goto out;
   2930 			}
   2931 		}
   2932 	}
   2933 
   2934 	if (cfg1 & NVM_CFG1_ILOS)
   2935 		sc->sc_ctrl |= CTRL_ILOS;
   2936 
   2937 	/*
   2938 	 * XXX
   2939 	 * This code isn't correct because pin 2 and 3 are located
   2940 	 * in different position on newer chips. Check all datasheet.
   2941 	 *
   2942 	 * Until resolve this problem, check if a chip < 82580
   2943 	 */
   2944 	if (sc->sc_type <= WM_T_82580) {
   2945 		if (sc->sc_type >= WM_T_82544) {
   2946 			sc->sc_ctrl |=
   2947 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2948 			    CTRL_SWDPIO_SHIFT;
   2949 			sc->sc_ctrl |=
   2950 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2951 			    CTRL_SWDPINS_SHIFT;
   2952 		} else {
   2953 			sc->sc_ctrl |=
   2954 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2955 			    CTRL_SWDPIO_SHIFT;
   2956 		}
   2957 	}
   2958 
   2959 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2960 		wm_nvm_read(sc,
   2961 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2962 		    1, &nvmword);
   2963 		if (nvmword & NVM_CFG3_ILOS)
   2964 			sc->sc_ctrl |= CTRL_ILOS;
   2965 	}
   2966 
   2967 #if 0
   2968 	if (sc->sc_type >= WM_T_82544) {
   2969 		if (cfg1 & NVM_CFG1_IPS0)
   2970 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2971 		if (cfg1 & NVM_CFG1_IPS1)
   2972 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2973 		sc->sc_ctrl_ext |=
   2974 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2975 		    CTRL_EXT_SWDPIO_SHIFT;
   2976 		sc->sc_ctrl_ext |=
   2977 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2978 		    CTRL_EXT_SWDPINS_SHIFT;
   2979 	} else {
   2980 		sc->sc_ctrl_ext |=
   2981 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2982 		    CTRL_EXT_SWDPIO_SHIFT;
   2983 	}
   2984 #endif
   2985 
   2986 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2987 #if 0
   2988 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2989 #endif
   2990 
   2991 	if (sc->sc_type == WM_T_PCH) {
   2992 		uint16_t val;
   2993 
   2994 		/* Save the NVM K1 bit setting */
   2995 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2996 
   2997 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2998 			sc->sc_nvm_k1_enabled = 1;
   2999 		else
   3000 			sc->sc_nvm_k1_enabled = 0;
   3001 	}
   3002 
   3003 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   3004 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   3005 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   3006 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   3007 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   3008 	    || sc->sc_type == WM_T_PCH_TGP
   3009 	    || sc->sc_type == WM_T_82573
   3010 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   3011 		/* Copper only */
   3012 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3013 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   3014 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   3015 	    || (sc->sc_type ==WM_T_I211)) {
   3016 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3017 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   3018 		switch (link_mode) {
   3019 		case CTRL_EXT_LINK_MODE_1000KX:
   3020 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   3021 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   3022 			break;
   3023 		case CTRL_EXT_LINK_MODE_SGMII:
   3024 			if (wm_sgmii_uses_mdio(sc)) {
   3025 				aprint_normal_dev(sc->sc_dev,
   3026 				    "SGMII(MDIO)\n");
   3027 				sc->sc_flags |= WM_F_SGMII;
   3028 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3029 				break;
   3030 			}
   3031 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   3032 			/*FALLTHROUGH*/
   3033 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   3034 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   3035 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   3036 				if (link_mode
   3037 				    == CTRL_EXT_LINK_MODE_SGMII) {
   3038 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3039 					sc->sc_flags |= WM_F_SGMII;
   3040 					aprint_verbose_dev(sc->sc_dev,
   3041 					    "SGMII\n");
   3042 				} else {
   3043 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   3044 					aprint_verbose_dev(sc->sc_dev,
   3045 					    "SERDES\n");
   3046 				}
   3047 				break;
   3048 			}
   3049 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   3050 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   3051 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3052 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   3053 				sc->sc_flags |= WM_F_SGMII;
   3054 			}
   3055 			/* Do not change link mode for 100BaseFX */
   3056 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   3057 				break;
   3058 
   3059 			/* Change current link mode setting */
   3060 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   3061 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3062 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   3063 			else
   3064 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   3065 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3066 			break;
   3067 		case CTRL_EXT_LINK_MODE_GMII:
   3068 		default:
   3069 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   3070 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3071 			break;
   3072 		}
   3073 
   3074 		reg &= ~CTRL_EXT_I2C_ENA;
   3075 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3076 			reg |= CTRL_EXT_I2C_ENA;
   3077 		else
   3078 			reg &= ~CTRL_EXT_I2C_ENA;
   3079 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3080 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3081 			if (!wm_sgmii_uses_mdio(sc))
   3082 				wm_gmii_setup_phytype(sc, 0, 0);
   3083 			wm_reset_mdicnfg_82580(sc);
   3084 		}
   3085 	} else if (sc->sc_type < WM_T_82543 ||
   3086 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3087 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3088 			aprint_error_dev(sc->sc_dev,
   3089 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3090 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3091 		}
   3092 	} else {
   3093 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3094 			aprint_error_dev(sc->sc_dev,
   3095 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3096 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3097 		}
   3098 	}
   3099 
   3100 	if (sc->sc_type >= WM_T_PCH2)
   3101 		sc->sc_flags |= WM_F_EEE;
   3102 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3103 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3104 		/* XXX: Need special handling for I354. (not yet) */
   3105 		if (sc->sc_type != WM_T_I354)
   3106 			sc->sc_flags |= WM_F_EEE;
   3107 	}
   3108 
   3109 	/*
   3110 	 * The I350 has a bug where it always strips the CRC whether
   3111 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3112 	 */
   3113 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3114 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3115 		sc->sc_flags |= WM_F_CRC_STRIP;
   3116 
   3117 	/*
   3118 	 * Workaround for some chips to delay sending LINK_STATE_UP.
   3119 	 * Some systems can't send packet soon after linkup. See also
   3120 	 * wm_linkintr_gmii(), wm_tick() and wm_gmii_mediastatus().
   3121 	 */
   3122 	switch (sc->sc_type) {
   3123 	case WM_T_I350:
   3124 	case WM_T_I354:
   3125 	case WM_T_I210:
   3126 	case WM_T_I211:
   3127 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3128 			sc->sc_flags |= WM_F_DELAY_LINKUP;
   3129 		break;
   3130 	default:
   3131 		break;
   3132 	}
   3133 
   3134 	/* Set device properties (macflags) */
   3135 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3136 
   3137 	if (sc->sc_flags != 0) {
   3138 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3139 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3140 	}
   3141 
   3142 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3143 
   3144 	/* Initialize the media structures accordingly. */
   3145 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3146 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3147 	else
   3148 		wm_tbi_mediainit(sc); /* All others */
   3149 
   3150 	ifp = &sc->sc_ethercom.ec_if;
   3151 	xname = device_xname(sc->sc_dev);
   3152 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3153 	ifp->if_softc = sc;
   3154 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3155 	ifp->if_extflags = IFEF_MPSAFE;
   3156 	ifp->if_ioctl = wm_ioctl;
   3157 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3158 		ifp->if_start = wm_nq_start;
   3159 		/*
   3160 		 * When the number of CPUs is one and the controller can use
   3161 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3162 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3163 		 * and the other is used for link status changing.
   3164 		 * In this situation, wm_nq_transmit() is disadvantageous
   3165 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3166 		 */
   3167 		if (wm_is_using_multiqueue(sc))
   3168 			ifp->if_transmit = wm_nq_transmit;
   3169 	} else {
   3170 		ifp->if_start = wm_start;
   3171 		/*
   3172 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3173 		 * described above.
   3174 		 */
   3175 		if (wm_is_using_multiqueue(sc))
   3176 			ifp->if_transmit = wm_transmit;
   3177 	}
   3178 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3179 	ifp->if_init = wm_init;
   3180 	ifp->if_stop = wm_stop;
   3181 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3182 	IFQ_SET_READY(&ifp->if_snd);
   3183 
   3184 	/* Check for jumbo frame */
   3185 	switch (sc->sc_type) {
   3186 	case WM_T_82573:
   3187 		/* XXX limited to 9234 if ASPM is disabled */
   3188 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3189 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3190 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3191 		break;
   3192 	case WM_T_82571:
   3193 	case WM_T_82572:
   3194 	case WM_T_82574:
   3195 	case WM_T_82583:
   3196 	case WM_T_82575:
   3197 	case WM_T_82576:
   3198 	case WM_T_82580:
   3199 	case WM_T_I350:
   3200 	case WM_T_I354:
   3201 	case WM_T_I210:
   3202 	case WM_T_I211:
   3203 	case WM_T_80003:
   3204 	case WM_T_ICH9:
   3205 	case WM_T_ICH10:
   3206 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3207 	case WM_T_PCH_LPT:
   3208 	case WM_T_PCH_SPT:
   3209 	case WM_T_PCH_CNP:
   3210 	case WM_T_PCH_TGP:
   3211 		/* XXX limited to 9234 */
   3212 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3213 		break;
   3214 	case WM_T_PCH:
   3215 		/* XXX limited to 4096 */
   3216 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3217 		break;
   3218 	case WM_T_82542_2_0:
   3219 	case WM_T_82542_2_1:
   3220 	case WM_T_ICH8:
   3221 		/* No support for jumbo frame */
   3222 		break;
   3223 	default:
   3224 		/* ETHER_MAX_LEN_JUMBO */
   3225 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3226 		break;
   3227 	}
   3228 
   3229 	/* If we're a i82543 or greater, we can support VLANs. */
   3230 	if (sc->sc_type >= WM_T_82543) {
   3231 		sc->sc_ethercom.ec_capabilities |=
   3232 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3233 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3234 	}
   3235 
   3236 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3237 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3238 
   3239 	/*
   3240 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3241 	 * on i82543 and later.
   3242 	 */
   3243 	if (sc->sc_type >= WM_T_82543) {
   3244 		ifp->if_capabilities |=
   3245 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3246 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3247 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3248 		    IFCAP_CSUM_TCPv6_Tx |
   3249 		    IFCAP_CSUM_UDPv6_Tx;
   3250 	}
   3251 
   3252 	/*
   3253 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3254 	 *
   3255 	 *	82541GI (8086:1076) ... no
   3256 	 *	82572EI (8086:10b9) ... yes
   3257 	 */
   3258 	if (sc->sc_type >= WM_T_82571) {
   3259 		ifp->if_capabilities |=
   3260 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3261 	}
   3262 
   3263 	/*
   3264 	 * If we're a i82544 or greater (except i82547), we can do
   3265 	 * TCP segmentation offload.
   3266 	 */
   3267 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3268 		ifp->if_capabilities |= IFCAP_TSOv4;
   3269 
   3270 	if (sc->sc_type >= WM_T_82571)
   3271 		ifp->if_capabilities |= IFCAP_TSOv6;
   3272 
   3273 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3274 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3275 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3276 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3277 
   3278 	/* Attach the interface. */
   3279 	if_initialize(ifp);
   3280 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3281 	ether_ifattach(ifp, enaddr);
   3282 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3283 	if_register(ifp);
   3284 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3285 	    RND_FLAG_DEFAULT);
   3286 
   3287 #ifdef WM_EVENT_COUNTERS
   3288 	/* Attach event counters. */
   3289 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3290 	    NULL, xname, "linkintr");
   3291 
   3292 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3293 	    NULL, xname, "CRC Error");
   3294 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3295 	    NULL, xname, "Symbol Error");
   3296 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3297 	    NULL, xname, "Missed Packets");
   3298 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3299 	    NULL, xname, "Collision");
   3300 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3301 	    NULL, xname, "Sequence Error");
   3302 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3303 	    NULL, xname, "Receive Length Error");
   3304 
   3305 	if (sc->sc_type >= WM_T_82543) {
   3306 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3307 		    NULL, xname, "Alignment Error");
   3308 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3309 		    NULL, xname, "Receive Error");
   3310 		/* XXX Does 82575 have HTDPMC? */
   3311 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3312 			evcnt_attach_dynamic(&sc->sc_ev_cexterr,
   3313 			    EVCNT_TYPE_MISC, NULL, xname,
   3314 			    "Carrier Extension Error");
   3315 		else
   3316 			evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
   3317 			    EVCNT_TYPE_MISC, NULL, xname,
   3318 			    "Host Transmit Discarded Packets by MAC");
   3319 
   3320 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3321 		    NULL, xname, "Tx with No CRS");
   3322 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3323 		    NULL, xname, "TCP Segmentation Context Tx");
   3324 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3325 			evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
   3326 			    EVCNT_TYPE_MISC, NULL, xname,
   3327 			    "TCP Segmentation Context Tx Fail");
   3328 		else {
   3329 			/* XXX Is the circuit breaker only for 82576? */
   3330 			evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
   3331 			    EVCNT_TYPE_MISC, NULL, xname,
   3332 			    "Circuit Breaker Rx Dropped Packet");
   3333 			evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
   3334 			    EVCNT_TYPE_MISC, NULL, xname,
   3335 			    "Circuit Breaker Rx Manageability Packet");
   3336 		}
   3337 	}
   3338 
   3339 	if (sc->sc_type >= WM_T_82542_2_1) {
   3340 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3341 		    NULL, xname, "XOFF Transmitted");
   3342 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3343 		    NULL, xname, "XON Transmitted");
   3344 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3345 		    NULL, xname, "XOFF Received");
   3346 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3347 		    NULL, xname, "XON Received");
   3348 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3349 		    NULL, xname, "FC Received Unsupported");
   3350 	}
   3351 
   3352 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3353 	    NULL, xname, "Single Collision");
   3354 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3355 	    NULL, xname, "Excessive Collisions");
   3356 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3357 	    NULL, xname, "Multiple Collision");
   3358 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3359 	    NULL, xname, "Late Collisions");
   3360 
   3361 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3362 		evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
   3363 		    NULL, xname, "Circuit Breaker Tx Manageability Packet");
   3364 
   3365 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3366 	    NULL, xname, "Defer");
   3367 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3368 	    NULL, xname, "Packets Rx (64 bytes)");
   3369 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3370 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3371 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3372 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3373 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3374 	    NULL, xname, "Packets Rx (256-511 bytes)");
   3375 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3376 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3377 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3378 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3379 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3380 	    NULL, xname, "Good Packets Rx");
   3381 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3382 	    NULL, xname, "Broadcast Packets Rx");
   3383 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3384 	    NULL, xname, "Multicast Packets Rx");
   3385 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3386 	    NULL, xname, "Good Packets Tx");
   3387 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3388 	    NULL, xname, "Good Octets Rx");
   3389 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3390 	    NULL, xname, "Good Octets Tx");
   3391 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3392 	    NULL, xname, "Rx No Buffers");
   3393 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3394 	    NULL, xname, "Rx Undersize (valid CRC)");
   3395 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3396 	    NULL, xname, "Rx Fragment (bad CRC)");
   3397 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3398 	    NULL, xname, "Rx Oversize (valid CRC)");
   3399 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3400 	    NULL, xname, "Rx Jabber (bad CRC)");
   3401 	if (sc->sc_type >= WM_T_82540) {
   3402 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3403 		    NULL, xname, "Management Packets RX");
   3404 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3405 		    NULL, xname, "Management Packets Dropped");
   3406 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3407 		    NULL, xname, "Management Packets TX");
   3408 	}
   3409 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3410 	    NULL, xname, "Total Octets Rx");
   3411 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3412 	    NULL, xname, "Total Octets Tx");
   3413 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3414 	    NULL, xname, "Total Packets Rx");
   3415 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3416 	    NULL, xname, "Total Packets Tx");
   3417 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3418 	    NULL, xname, "Packets Tx (64 bytes)");
   3419 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3420 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3421 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3422 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3423 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3424 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3425 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3426 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3427 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3428 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3429 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3430 	    NULL, xname, "Multicast Packets Tx");
   3431 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3432 	    NULL, xname, "Broadcast Packets Tx");
   3433 	if (sc->sc_type >= WM_T_82571) /* PCIe, 80003 and ICH/PCHs */
   3434 		evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3435 		    NULL, xname, "Interrupt Assertion");
   3436 	if (sc->sc_type < WM_T_82575) {
   3437 		evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3438 		    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3439 		evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3440 		    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3441 		evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3442 		    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3443 		evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC,
   3444 		    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3445 		evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3446 		    NULL, xname, "Intr. Cause Tx Queue Empty");
   3447 		evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3448 		    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3449 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3450 		    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3451 
   3452 		/* XXX 82575 document says it has ICRXOC. Is that right? */
   3453 		evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3454 		    NULL, xname, "Interrupt Cause Receiver Overrun");
   3455 	} else if (!WM_IS_ICHPCH(sc)) {
   3456 		/*
   3457 		 * For 82575 and newer.
   3458 		 *
   3459 		 * On 80003, ICHs and PCHs, it seems all of the following
   3460 		 * registers are zero.
   3461 		 */
   3462 		evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
   3463 		    NULL, xname, "Rx Packets To Host");
   3464 		evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
   3465 		    NULL, xname, "Debug Counter 1");
   3466 		evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
   3467 		    NULL, xname, "Debug Counter 2");
   3468 		evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
   3469 		    NULL, xname, "Debug Counter 3");
   3470 
   3471 		/*
   3472 		 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
   3473 		 * I think it's wrong. The real count I observed is the same
   3474 		 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
   3475 		 * It's HGPTC(Host Good Packets Tx) which is described in
   3476 		 * 82576's datasheet.
   3477 		 */
   3478 		evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
   3479 		    NULL, xname, "Host Good Packets TX");
   3480 
   3481 		evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
   3482 		    NULL, xname, "Debug Counter 4");
   3483 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3484 		    NULL, xname, "Rx Desc Min Thresh");
   3485 		/* XXX Is the circuit breaker only for 82576? */
   3486 		evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
   3487 		    NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
   3488 
   3489 		evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
   3490 		    NULL, xname, "Host Good Octets Rx");
   3491 		evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
   3492 		    NULL, xname, "Host Good Octets Tx");
   3493 		evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
   3494 		    NULL, xname, "Length Errors (length/type <= 1500)");
   3495 		evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
   3496 		    NULL, xname, "SerDes/SGMII Code Violation Packet");
   3497 		evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
   3498 		    NULL, xname, "Header Redirection Missed Packet");
   3499 	}
   3500 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3501 		evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
   3502 		    NULL, xname, "EEE Tx LPI");
   3503 		evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
   3504 		    NULL, xname, "EEE Rx LPI");
   3505 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3506 		    NULL, xname, "BMC2OS Packets received by host");
   3507 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3508 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3509 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3510 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3511 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3512 		    NULL, xname, "OS2BMC Packets received by BMC");
   3513 	}
   3514 #endif /* WM_EVENT_COUNTERS */
   3515 
   3516 	sc->sc_txrx_use_workqueue = false;
   3517 
   3518 	if (wm_phy_need_linkdown_discard(sc)) {
   3519 		DPRINTF(sc, WM_DEBUG_LINK,
   3520 		    ("%s: %s: Set linkdown discard flag\n",
   3521 			device_xname(sc->sc_dev), __func__));
   3522 		wm_set_linkdown_discard(sc);
   3523 	}
   3524 
   3525 	wm_init_sysctls(sc);
   3526 
   3527 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3528 		pmf_class_network_register(self, ifp);
   3529 	else
   3530 		aprint_error_dev(self, "couldn't establish power handler\n");
   3531 
   3532 	sc->sc_flags |= WM_F_ATTACHED;
   3533 out:
   3534 	return;
   3535 }
   3536 
   3537 /* The detach function (ca_detach) */
   3538 static int
   3539 wm_detach(device_t self, int flags __unused)
   3540 {
   3541 	struct wm_softc *sc = device_private(self);
   3542 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3543 	int i;
   3544 
   3545 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3546 		return 0;
   3547 
   3548 	/* Stop the interface. Callouts are stopped in it. */
   3549 	IFNET_LOCK(ifp);
   3550 	sc->sc_dying = true;
   3551 	wm_stop(ifp, 1);
   3552 	IFNET_UNLOCK(ifp);
   3553 
   3554 	pmf_device_deregister(self);
   3555 
   3556 	sysctl_teardown(&sc->sc_sysctllog);
   3557 
   3558 #ifdef WM_EVENT_COUNTERS
   3559 	evcnt_detach(&sc->sc_ev_linkintr);
   3560 
   3561 	evcnt_detach(&sc->sc_ev_crcerrs);
   3562 	evcnt_detach(&sc->sc_ev_symerrc);
   3563 	evcnt_detach(&sc->sc_ev_mpc);
   3564 	evcnt_detach(&sc->sc_ev_colc);
   3565 	evcnt_detach(&sc->sc_ev_sec);
   3566 	evcnt_detach(&sc->sc_ev_rlec);
   3567 
   3568 	if (sc->sc_type >= WM_T_82543) {
   3569 		evcnt_detach(&sc->sc_ev_algnerrc);
   3570 		evcnt_detach(&sc->sc_ev_rxerrc);
   3571 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3572 			evcnt_detach(&sc->sc_ev_cexterr);
   3573 		else
   3574 			evcnt_detach(&sc->sc_ev_htdpmc);
   3575 
   3576 		evcnt_detach(&sc->sc_ev_tncrs);
   3577 		evcnt_detach(&sc->sc_ev_tsctc);
   3578 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3579 			evcnt_detach(&sc->sc_ev_tsctfc);
   3580 		else {
   3581 			evcnt_detach(&sc->sc_ev_cbrdpc);
   3582 			evcnt_detach(&sc->sc_ev_cbrmpc);
   3583 		}
   3584 	}
   3585 
   3586 	if (sc->sc_type >= WM_T_82542_2_1) {
   3587 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3588 		evcnt_detach(&sc->sc_ev_tx_xon);
   3589 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3590 		evcnt_detach(&sc->sc_ev_rx_xon);
   3591 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3592 	}
   3593 
   3594 	evcnt_detach(&sc->sc_ev_scc);
   3595 	evcnt_detach(&sc->sc_ev_ecol);
   3596 	evcnt_detach(&sc->sc_ev_mcc);
   3597 	evcnt_detach(&sc->sc_ev_latecol);
   3598 
   3599 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3600 		evcnt_detach(&sc->sc_ev_cbtmpc);
   3601 
   3602 	evcnt_detach(&sc->sc_ev_dc);
   3603 	evcnt_detach(&sc->sc_ev_prc64);
   3604 	evcnt_detach(&sc->sc_ev_prc127);
   3605 	evcnt_detach(&sc->sc_ev_prc255);
   3606 	evcnt_detach(&sc->sc_ev_prc511);
   3607 	evcnt_detach(&sc->sc_ev_prc1023);
   3608 	evcnt_detach(&sc->sc_ev_prc1522);
   3609 	evcnt_detach(&sc->sc_ev_gprc);
   3610 	evcnt_detach(&sc->sc_ev_bprc);
   3611 	evcnt_detach(&sc->sc_ev_mprc);
   3612 	evcnt_detach(&sc->sc_ev_gptc);
   3613 	evcnt_detach(&sc->sc_ev_gorc);
   3614 	evcnt_detach(&sc->sc_ev_gotc);
   3615 	evcnt_detach(&sc->sc_ev_rnbc);
   3616 	evcnt_detach(&sc->sc_ev_ruc);
   3617 	evcnt_detach(&sc->sc_ev_rfc);
   3618 	evcnt_detach(&sc->sc_ev_roc);
   3619 	evcnt_detach(&sc->sc_ev_rjc);
   3620 	if (sc->sc_type >= WM_T_82540) {
   3621 		evcnt_detach(&sc->sc_ev_mgtprc);
   3622 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3623 		evcnt_detach(&sc->sc_ev_mgtptc);
   3624 	}
   3625 	evcnt_detach(&sc->sc_ev_tor);
   3626 	evcnt_detach(&sc->sc_ev_tot);
   3627 	evcnt_detach(&sc->sc_ev_tpr);
   3628 	evcnt_detach(&sc->sc_ev_tpt);
   3629 	evcnt_detach(&sc->sc_ev_ptc64);
   3630 	evcnt_detach(&sc->sc_ev_ptc127);
   3631 	evcnt_detach(&sc->sc_ev_ptc255);
   3632 	evcnt_detach(&sc->sc_ev_ptc511);
   3633 	evcnt_detach(&sc->sc_ev_ptc1023);
   3634 	evcnt_detach(&sc->sc_ev_ptc1522);
   3635 	evcnt_detach(&sc->sc_ev_mptc);
   3636 	evcnt_detach(&sc->sc_ev_bptc);
   3637 	if (sc->sc_type >= WM_T_82571)
   3638 		evcnt_detach(&sc->sc_ev_iac);
   3639 	if (sc->sc_type < WM_T_82575) {
   3640 		evcnt_detach(&sc->sc_ev_icrxptc);
   3641 		evcnt_detach(&sc->sc_ev_icrxatc);
   3642 		evcnt_detach(&sc->sc_ev_ictxptc);
   3643 		evcnt_detach(&sc->sc_ev_ictxatc);
   3644 		evcnt_detach(&sc->sc_ev_ictxqec);
   3645 		evcnt_detach(&sc->sc_ev_ictxqmtc);
   3646 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3647 		evcnt_detach(&sc->sc_ev_icrxoc);
   3648 	} else if (!WM_IS_ICHPCH(sc)) {
   3649 		evcnt_detach(&sc->sc_ev_rpthc);
   3650 		evcnt_detach(&sc->sc_ev_debug1);
   3651 		evcnt_detach(&sc->sc_ev_debug2);
   3652 		evcnt_detach(&sc->sc_ev_debug3);
   3653 		evcnt_detach(&sc->sc_ev_hgptc);
   3654 		evcnt_detach(&sc->sc_ev_debug4);
   3655 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3656 		evcnt_detach(&sc->sc_ev_htcbdpc);
   3657 
   3658 		evcnt_detach(&sc->sc_ev_hgorc);
   3659 		evcnt_detach(&sc->sc_ev_hgotc);
   3660 		evcnt_detach(&sc->sc_ev_lenerrs);
   3661 		evcnt_detach(&sc->sc_ev_scvpc);
   3662 		evcnt_detach(&sc->sc_ev_hrmpc);
   3663 	}
   3664 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3665 		evcnt_detach(&sc->sc_ev_tlpic);
   3666 		evcnt_detach(&sc->sc_ev_rlpic);
   3667 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3668 		evcnt_detach(&sc->sc_ev_o2bspc);
   3669 		evcnt_detach(&sc->sc_ev_b2ospc);
   3670 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3671 	}
   3672 #endif /* WM_EVENT_COUNTERS */
   3673 
   3674 	rnd_detach_source(&sc->rnd_source);
   3675 
   3676 	/* Tell the firmware about the release */
   3677 	mutex_enter(sc->sc_core_lock);
   3678 	wm_release_manageability(sc);
   3679 	wm_release_hw_control(sc);
   3680 	wm_enable_wakeup(sc);
   3681 	mutex_exit(sc->sc_core_lock);
   3682 
   3683 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3684 
   3685 	ether_ifdetach(ifp);
   3686 	if_detach(ifp);
   3687 	if_percpuq_destroy(sc->sc_ipq);
   3688 
   3689 	/* Delete all remaining media. */
   3690 	ifmedia_fini(&sc->sc_mii.mii_media);
   3691 
   3692 	/* Unload RX dmamaps and free mbufs */
   3693 	for (i = 0; i < sc->sc_nqueues; i++) {
   3694 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3695 		mutex_enter(rxq->rxq_lock);
   3696 		wm_rxdrain(rxq);
   3697 		mutex_exit(rxq->rxq_lock);
   3698 	}
   3699 	/* Must unlock here */
   3700 
   3701 	/* Disestablish the interrupt handler */
   3702 	for (i = 0; i < sc->sc_nintrs; i++) {
   3703 		if (sc->sc_ihs[i] != NULL) {
   3704 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3705 			sc->sc_ihs[i] = NULL;
   3706 		}
   3707 	}
   3708 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3709 
   3710 	/* wm_stop() ensured that the workqueues are stopped. */
   3711 	workqueue_destroy(sc->sc_queue_wq);
   3712 	workqueue_destroy(sc->sc_reset_wq);
   3713 
   3714 	for (i = 0; i < sc->sc_nqueues; i++)
   3715 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3716 
   3717 	wm_free_txrx_queues(sc);
   3718 
   3719 	/* Unmap the registers */
   3720 	if (sc->sc_ss) {
   3721 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3722 		sc->sc_ss = 0;
   3723 	}
   3724 	if (sc->sc_ios) {
   3725 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3726 		sc->sc_ios = 0;
   3727 	}
   3728 	if (sc->sc_flashs) {
   3729 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3730 		sc->sc_flashs = 0;
   3731 	}
   3732 
   3733 	if (sc->sc_core_lock)
   3734 		mutex_obj_free(sc->sc_core_lock);
   3735 	if (sc->sc_ich_phymtx)
   3736 		mutex_obj_free(sc->sc_ich_phymtx);
   3737 	if (sc->sc_ich_nvmmtx)
   3738 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3739 
   3740 	return 0;
   3741 }
   3742 
   3743 static bool
   3744 wm_suspend(device_t self, const pmf_qual_t *qual)
   3745 {
   3746 	struct wm_softc *sc = device_private(self);
   3747 
   3748 	wm_release_manageability(sc);
   3749 	wm_release_hw_control(sc);
   3750 	wm_enable_wakeup(sc);
   3751 
   3752 	return true;
   3753 }
   3754 
   3755 static bool
   3756 wm_resume(device_t self, const pmf_qual_t *qual)
   3757 {
   3758 	struct wm_softc *sc = device_private(self);
   3759 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3760 	pcireg_t reg;
   3761 	char buf[256];
   3762 
   3763 	reg = CSR_READ(sc, WMREG_WUS);
   3764 	if (reg != 0) {
   3765 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3766 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3767 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3768 	}
   3769 
   3770 	if (sc->sc_type >= WM_T_PCH2)
   3771 		wm_resume_workarounds_pchlan(sc);
   3772 	IFNET_LOCK(ifp);
   3773 	if ((ifp->if_flags & IFF_UP) == 0) {
   3774 		/* >= PCH_SPT hardware workaround before reset. */
   3775 		if (sc->sc_type >= WM_T_PCH_SPT)
   3776 			wm_flush_desc_rings(sc);
   3777 
   3778 		wm_reset(sc);
   3779 		/* Non-AMT based hardware can now take control from firmware */
   3780 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3781 			wm_get_hw_control(sc);
   3782 		wm_init_manageability(sc);
   3783 	} else {
   3784 		/*
   3785 		 * We called pmf_class_network_register(), so if_init() is
   3786 		 * automatically called when IFF_UP. wm_reset(),
   3787 		 * wm_get_hw_control() and wm_init_manageability() are called
   3788 		 * via wm_init().
   3789 		 */
   3790 	}
   3791 	IFNET_UNLOCK(ifp);
   3792 
   3793 	return true;
   3794 }
   3795 
   3796 /*
   3797  * wm_watchdog:
   3798  *
   3799  *	Watchdog checker.
   3800  */
   3801 static bool
   3802 wm_watchdog(struct ifnet *ifp)
   3803 {
   3804 	int qid;
   3805 	struct wm_softc *sc = ifp->if_softc;
   3806 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3807 
   3808 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3809 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3810 
   3811 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3812 	}
   3813 
   3814 #ifdef WM_DEBUG
   3815 	if (sc->sc_trigger_reset) {
   3816 		/* debug operation, no need for atomicity or reliability */
   3817 		sc->sc_trigger_reset = 0;
   3818 		hang_queue++;
   3819 	}
   3820 #endif
   3821 
   3822 	if (hang_queue == 0)
   3823 		return true;
   3824 
   3825 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3826 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3827 
   3828 	return false;
   3829 }
   3830 
   3831 /*
   3832  * Perform an interface watchdog reset.
   3833  */
   3834 static void
   3835 wm_handle_reset_work(struct work *work, void *arg)
   3836 {
   3837 	struct wm_softc * const sc = arg;
   3838 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3839 
   3840 	/* Don't want ioctl operations to happen */
   3841 	IFNET_LOCK(ifp);
   3842 
   3843 	/* reset the interface. */
   3844 	wm_init(ifp);
   3845 
   3846 	IFNET_UNLOCK(ifp);
   3847 
   3848 	/*
   3849 	 * There are still some upper layer processing which call
   3850 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3851 	 */
   3852 	/* Try to get more packets going. */
   3853 	ifp->if_start(ifp);
   3854 
   3855 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3856 }
   3857 
   3858 
   3859 static void
   3860 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3861 {
   3862 
   3863 	mutex_enter(txq->txq_lock);
   3864 	if (txq->txq_sending &&
   3865 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3866 		wm_watchdog_txq_locked(ifp, txq, hang);
   3867 
   3868 	mutex_exit(txq->txq_lock);
   3869 }
   3870 
   3871 static void
   3872 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3873     uint16_t *hang)
   3874 {
   3875 	struct wm_softc *sc = ifp->if_softc;
   3876 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3877 
   3878 	KASSERT(mutex_owned(txq->txq_lock));
   3879 
   3880 	/*
   3881 	 * Since we're using delayed interrupts, sweep up
   3882 	 * before we report an error.
   3883 	 */
   3884 	wm_txeof(txq, UINT_MAX);
   3885 
   3886 	if (txq->txq_sending)
   3887 		*hang |= __BIT(wmq->wmq_id);
   3888 
   3889 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3890 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3891 		    device_xname(sc->sc_dev));
   3892 	} else {
   3893 #ifdef WM_DEBUG
   3894 		int i, j;
   3895 		struct wm_txsoft *txs;
   3896 #endif
   3897 		log(LOG_ERR,
   3898 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3899 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3900 		    txq->txq_next);
   3901 		if_statinc(ifp, if_oerrors);
   3902 #ifdef WM_DEBUG
   3903 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3904 		     i = WM_NEXTTXS(txq, i)) {
   3905 			txs = &txq->txq_soft[i];
   3906 			printf("txs %d tx %d -> %d\n",
   3907 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3908 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3909 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3910 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3911 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3912 					printf("\t %#08x%08x\n",
   3913 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3914 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3915 				} else {
   3916 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3917 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3918 					    txq->txq_descs[j].wtx_addr.wa_low);
   3919 					printf("\t %#04x%02x%02x%08x\n",
   3920 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3921 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3922 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3923 					    txq->txq_descs[j].wtx_cmdlen);
   3924 				}
   3925 				if (j == txs->txs_lastdesc)
   3926 					break;
   3927 			}
   3928 		}
   3929 #endif
   3930 	}
   3931 }
   3932 
   3933 /*
   3934  * wm_tick:
   3935  *
   3936  *	One second timer, used to check link status, sweep up
   3937  *	completed transmit jobs, etc.
   3938  */
   3939 static void
   3940 wm_tick(void *arg)
   3941 {
   3942 	struct wm_softc *sc = arg;
   3943 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3944 
   3945 	mutex_enter(sc->sc_core_lock);
   3946 
   3947 	if (sc->sc_core_stopping) {
   3948 		mutex_exit(sc->sc_core_lock);
   3949 		return;
   3950 	}
   3951 
   3952 	wm_update_stats(sc);
   3953 
   3954 	if (sc->sc_flags & WM_F_HAS_MII) {
   3955 		bool dotick = true;
   3956 
   3957 		/*
   3958 		 * Workaround for some chips to delay sending LINK_STATE_UP.
   3959 		 * See also wm_linkintr_gmii() and wm_gmii_mediastatus().
   3960 		 */
   3961 		if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   3962 			struct timeval now;
   3963 
   3964 			getmicrotime(&now);
   3965 			if (timercmp(&now, &sc->sc_linkup_delay_time, <))
   3966 				dotick = false;
   3967 			else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   3968 				/* Simplify by checking tv_sec only. */
   3969 
   3970 				sc->sc_linkup_delay_time.tv_sec = 0;
   3971 				sc->sc_linkup_delay_time.tv_usec = 0;
   3972 			}
   3973 		}
   3974 		if (dotick)
   3975 			mii_tick(&sc->sc_mii);
   3976 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3977 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3978 		wm_serdes_tick(sc);
   3979 	else
   3980 		wm_tbi_tick(sc);
   3981 
   3982 	mutex_exit(sc->sc_core_lock);
   3983 
   3984 	if (wm_watchdog(ifp))
   3985 		callout_schedule(&sc->sc_tick_ch, hz);
   3986 }
   3987 
   3988 static int
   3989 wm_ifflags_cb(struct ethercom *ec)
   3990 {
   3991 	struct ifnet *ifp = &ec->ec_if;
   3992 	struct wm_softc *sc = ifp->if_softc;
   3993 	u_short iffchange;
   3994 	int ecchange;
   3995 	bool needreset = false;
   3996 	int rc = 0;
   3997 
   3998 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3999 		device_xname(sc->sc_dev), __func__));
   4000 
   4001 	KASSERT(IFNET_LOCKED(ifp));
   4002 
   4003 	mutex_enter(sc->sc_core_lock);
   4004 
   4005 	/*
   4006 	 * Check for if_flags.
   4007 	 * Main usage is to prevent linkdown when opening bpf.
   4008 	 */
   4009 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   4010 	sc->sc_if_flags = ifp->if_flags;
   4011 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   4012 		needreset = true;
   4013 		goto ec;
   4014 	}
   4015 
   4016 	/* iff related updates */
   4017 	if ((iffchange & IFF_PROMISC) != 0)
   4018 		wm_set_filter(sc);
   4019 
   4020 	wm_set_vlan(sc);
   4021 
   4022 ec:
   4023 	/* Check for ec_capenable. */
   4024 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   4025 	sc->sc_ec_capenable = ec->ec_capenable;
   4026 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   4027 		needreset = true;
   4028 		goto out;
   4029 	}
   4030 
   4031 	/* ec related updates */
   4032 	wm_set_eee(sc);
   4033 
   4034 out:
   4035 	if (needreset)
   4036 		rc = ENETRESET;
   4037 	mutex_exit(sc->sc_core_lock);
   4038 
   4039 	return rc;
   4040 }
   4041 
   4042 static bool
   4043 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   4044 {
   4045 
   4046 	switch (sc->sc_phytype) {
   4047 	case WMPHY_82577: /* ihphy */
   4048 	case WMPHY_82578: /* atphy */
   4049 	case WMPHY_82579: /* ihphy */
   4050 	case WMPHY_I217: /* ihphy */
   4051 	case WMPHY_82580: /* ihphy */
   4052 	case WMPHY_I350: /* ihphy */
   4053 		return true;
   4054 	default:
   4055 		return false;
   4056 	}
   4057 }
   4058 
   4059 static void
   4060 wm_set_linkdown_discard(struct wm_softc *sc)
   4061 {
   4062 
   4063 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4064 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4065 
   4066 		mutex_enter(txq->txq_lock);
   4067 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   4068 		mutex_exit(txq->txq_lock);
   4069 	}
   4070 }
   4071 
   4072 static void
   4073 wm_clear_linkdown_discard(struct wm_softc *sc)
   4074 {
   4075 
   4076 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4077 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4078 
   4079 		mutex_enter(txq->txq_lock);
   4080 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   4081 		mutex_exit(txq->txq_lock);
   4082 	}
   4083 }
   4084 
   4085 /*
   4086  * wm_ioctl:		[ifnet interface function]
   4087  *
   4088  *	Handle control requests from the operator.
   4089  */
   4090 static int
   4091 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   4092 {
   4093 	struct wm_softc *sc = ifp->if_softc;
   4094 	struct ifreq *ifr = (struct ifreq *)data;
   4095 	struct ifaddr *ifa = (struct ifaddr *)data;
   4096 	struct sockaddr_dl *sdl;
   4097 	int error;
   4098 
   4099 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4100 		device_xname(sc->sc_dev), __func__));
   4101 
   4102 	switch (cmd) {
   4103 	case SIOCADDMULTI:
   4104 	case SIOCDELMULTI:
   4105 		break;
   4106 	default:
   4107 		KASSERT(IFNET_LOCKED(ifp));
   4108 	}
   4109 
   4110 	if (cmd == SIOCZIFDATA) {
   4111 		/*
   4112 		 * Special handling for SIOCZIFDATA.
   4113 		 * Copying and clearing the if_data structure is done with
   4114 		 * ether_ioctl() below.
   4115 		 */
   4116 		mutex_enter(sc->sc_core_lock);
   4117 		wm_update_stats(sc);
   4118 		wm_clear_evcnt(sc);
   4119 		mutex_exit(sc->sc_core_lock);
   4120 	}
   4121 
   4122 	switch (cmd) {
   4123 	case SIOCSIFMEDIA:
   4124 		mutex_enter(sc->sc_core_lock);
   4125 		/* Flow control requires full-duplex mode. */
   4126 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4127 		    (ifr->ifr_media & IFM_FDX) == 0)
   4128 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4129 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4130 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4131 				/* We can do both TXPAUSE and RXPAUSE. */
   4132 				ifr->ifr_media |=
   4133 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4134 			}
   4135 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4136 		}
   4137 		mutex_exit(sc->sc_core_lock);
   4138 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4139 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4140 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4141 				DPRINTF(sc, WM_DEBUG_LINK,
   4142 				    ("%s: %s: Set linkdown discard flag\n",
   4143 					device_xname(sc->sc_dev), __func__));
   4144 				wm_set_linkdown_discard(sc);
   4145 			}
   4146 		}
   4147 		break;
   4148 	case SIOCINITIFADDR:
   4149 		mutex_enter(sc->sc_core_lock);
   4150 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4151 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4152 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4153 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4154 			/* Unicast address is the first multicast entry */
   4155 			wm_set_filter(sc);
   4156 			error = 0;
   4157 			mutex_exit(sc->sc_core_lock);
   4158 			break;
   4159 		}
   4160 		mutex_exit(sc->sc_core_lock);
   4161 		/*FALLTHROUGH*/
   4162 	default:
   4163 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4164 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4165 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4166 				DPRINTF(sc, WM_DEBUG_LINK,
   4167 				    ("%s: %s: Set linkdown discard flag\n",
   4168 					device_xname(sc->sc_dev), __func__));
   4169 				wm_set_linkdown_discard(sc);
   4170 			}
   4171 		}
   4172 		const int s = splnet();
   4173 		/* It may call wm_start, so unlock here */
   4174 		error = ether_ioctl(ifp, cmd, data);
   4175 		splx(s);
   4176 		if (error != ENETRESET)
   4177 			break;
   4178 
   4179 		error = 0;
   4180 
   4181 		if (cmd == SIOCSIFCAP)
   4182 			error = if_init(ifp);
   4183 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4184 			mutex_enter(sc->sc_core_lock);
   4185 			if (sc->sc_if_flags & IFF_RUNNING) {
   4186 				/*
   4187 				 * Multicast list has changed; set the
   4188 				 * hardware filter accordingly.
   4189 				 */
   4190 				wm_set_filter(sc);
   4191 			}
   4192 			mutex_exit(sc->sc_core_lock);
   4193 		}
   4194 		break;
   4195 	}
   4196 
   4197 	return error;
   4198 }
   4199 
   4200 /* MAC address related */
   4201 
   4202 /*
   4203  * Get the offset of MAC address and return it.
   4204  * If error occured, use offset 0.
   4205  */
   4206 static uint16_t
   4207 wm_check_alt_mac_addr(struct wm_softc *sc)
   4208 {
   4209 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4210 	uint16_t offset = NVM_OFF_MACADDR;
   4211 
   4212 	/* Try to read alternative MAC address pointer */
   4213 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4214 		return 0;
   4215 
   4216 	/* Check pointer if it's valid or not. */
   4217 	if ((offset == 0x0000) || (offset == 0xffff))
   4218 		return 0;
   4219 
   4220 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4221 	/*
   4222 	 * Check whether alternative MAC address is valid or not.
   4223 	 * Some cards have non 0xffff pointer but those don't use
   4224 	 * alternative MAC address in reality.
   4225 	 *
   4226 	 * Check whether the broadcast bit is set or not.
   4227 	 */
   4228 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4229 		if (((myea[0] & 0xff) & 0x01) == 0)
   4230 			return offset; /* Found */
   4231 
   4232 	/* Not found */
   4233 	return 0;
   4234 }
   4235 
   4236 static int
   4237 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4238 {
   4239 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4240 	uint16_t offset = NVM_OFF_MACADDR;
   4241 	int do_invert = 0;
   4242 
   4243 	switch (sc->sc_type) {
   4244 	case WM_T_82580:
   4245 	case WM_T_I350:
   4246 	case WM_T_I354:
   4247 		/* EEPROM Top Level Partitioning */
   4248 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4249 		break;
   4250 	case WM_T_82571:
   4251 	case WM_T_82575:
   4252 	case WM_T_82576:
   4253 	case WM_T_80003:
   4254 	case WM_T_I210:
   4255 	case WM_T_I211:
   4256 		offset = wm_check_alt_mac_addr(sc);
   4257 		if (offset == 0)
   4258 			if ((sc->sc_funcid & 0x01) == 1)
   4259 				do_invert = 1;
   4260 		break;
   4261 	default:
   4262 		if ((sc->sc_funcid & 0x01) == 1)
   4263 			do_invert = 1;
   4264 		break;
   4265 	}
   4266 
   4267 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4268 		goto bad;
   4269 
   4270 	enaddr[0] = myea[0] & 0xff;
   4271 	enaddr[1] = myea[0] >> 8;
   4272 	enaddr[2] = myea[1] & 0xff;
   4273 	enaddr[3] = myea[1] >> 8;
   4274 	enaddr[4] = myea[2] & 0xff;
   4275 	enaddr[5] = myea[2] >> 8;
   4276 
   4277 	/*
   4278 	 * Toggle the LSB of the MAC address on the second port
   4279 	 * of some dual port cards.
   4280 	 */
   4281 	if (do_invert != 0)
   4282 		enaddr[5] ^= 1;
   4283 
   4284 	return 0;
   4285 
   4286 bad:
   4287 	return -1;
   4288 }
   4289 
   4290 /*
   4291  * wm_set_ral:
   4292  *
   4293  *	Set an entery in the receive address list.
   4294  */
   4295 static void
   4296 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4297 {
   4298 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4299 	uint32_t wlock_mac;
   4300 	int rv;
   4301 
   4302 	if (enaddr != NULL) {
   4303 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4304 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4305 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4306 		ral_hi |= RAL_AV;
   4307 	} else {
   4308 		ral_lo = 0;
   4309 		ral_hi = 0;
   4310 	}
   4311 
   4312 	switch (sc->sc_type) {
   4313 	case WM_T_82542_2_0:
   4314 	case WM_T_82542_2_1:
   4315 	case WM_T_82543:
   4316 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4317 		CSR_WRITE_FLUSH(sc);
   4318 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4319 		CSR_WRITE_FLUSH(sc);
   4320 		break;
   4321 	case WM_T_PCH2:
   4322 	case WM_T_PCH_LPT:
   4323 	case WM_T_PCH_SPT:
   4324 	case WM_T_PCH_CNP:
   4325 	case WM_T_PCH_TGP:
   4326 		if (idx == 0) {
   4327 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4328 			CSR_WRITE_FLUSH(sc);
   4329 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4330 			CSR_WRITE_FLUSH(sc);
   4331 			return;
   4332 		}
   4333 		if (sc->sc_type != WM_T_PCH2) {
   4334 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4335 			    FWSM_WLOCK_MAC);
   4336 			addrl = WMREG_SHRAL(idx - 1);
   4337 			addrh = WMREG_SHRAH(idx - 1);
   4338 		} else {
   4339 			wlock_mac = 0;
   4340 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4341 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4342 		}
   4343 
   4344 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4345 			rv = wm_get_swflag_ich8lan(sc);
   4346 			if (rv != 0)
   4347 				return;
   4348 			CSR_WRITE(sc, addrl, ral_lo);
   4349 			CSR_WRITE_FLUSH(sc);
   4350 			CSR_WRITE(sc, addrh, ral_hi);
   4351 			CSR_WRITE_FLUSH(sc);
   4352 			wm_put_swflag_ich8lan(sc);
   4353 		}
   4354 
   4355 		break;
   4356 	default:
   4357 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4358 		CSR_WRITE_FLUSH(sc);
   4359 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4360 		CSR_WRITE_FLUSH(sc);
   4361 		break;
   4362 	}
   4363 }
   4364 
   4365 /*
   4366  * wm_mchash:
   4367  *
   4368  *	Compute the hash of the multicast address for the 4096-bit
   4369  *	multicast filter.
   4370  */
   4371 static uint32_t
   4372 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4373 {
   4374 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4375 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4376 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4377 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4378 	uint32_t hash;
   4379 
   4380 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4381 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4382 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4383 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
   4384 	    || (sc->sc_type == WM_T_PCH_TGP)) {
   4385 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4386 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4387 		return (hash & 0x3ff);
   4388 	}
   4389 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4390 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4391 
   4392 	return (hash & 0xfff);
   4393 }
   4394 
   4395 /*
   4396  *
   4397  *
   4398  */
   4399 static int
   4400 wm_rar_count(struct wm_softc *sc)
   4401 {
   4402 	int size;
   4403 
   4404 	switch (sc->sc_type) {
   4405 	case WM_T_ICH8:
   4406 		size = WM_RAL_TABSIZE_ICH8 -1;
   4407 		break;
   4408 	case WM_T_ICH9:
   4409 	case WM_T_ICH10:
   4410 	case WM_T_PCH:
   4411 		size = WM_RAL_TABSIZE_ICH8;
   4412 		break;
   4413 	case WM_T_PCH2:
   4414 		size = WM_RAL_TABSIZE_PCH2;
   4415 		break;
   4416 	case WM_T_PCH_LPT:
   4417 	case WM_T_PCH_SPT:
   4418 	case WM_T_PCH_CNP:
   4419 	case WM_T_PCH_TGP:
   4420 		size = WM_RAL_TABSIZE_PCH_LPT;
   4421 		break;
   4422 	case WM_T_82575:
   4423 	case WM_T_I210:
   4424 	case WM_T_I211:
   4425 		size = WM_RAL_TABSIZE_82575;
   4426 		break;
   4427 	case WM_T_82576:
   4428 	case WM_T_82580:
   4429 		size = WM_RAL_TABSIZE_82576;
   4430 		break;
   4431 	case WM_T_I350:
   4432 	case WM_T_I354:
   4433 		size = WM_RAL_TABSIZE_I350;
   4434 		break;
   4435 	default:
   4436 		size = WM_RAL_TABSIZE;
   4437 	}
   4438 
   4439 	return size;
   4440 }
   4441 
   4442 /*
   4443  * wm_set_filter:
   4444  *
   4445  *	Set up the receive filter.
   4446  */
   4447 static void
   4448 wm_set_filter(struct wm_softc *sc)
   4449 {
   4450 	struct ethercom *ec = &sc->sc_ethercom;
   4451 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4452 	struct ether_multi *enm;
   4453 	struct ether_multistep step;
   4454 	bus_addr_t mta_reg;
   4455 	uint32_t hash, reg, bit;
   4456 	int i, size, ralmax, rv;
   4457 
   4458 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4459 		device_xname(sc->sc_dev), __func__));
   4460 	KASSERT(mutex_owned(sc->sc_core_lock));
   4461 
   4462 	if (sc->sc_type >= WM_T_82544)
   4463 		mta_reg = WMREG_CORDOVA_MTA;
   4464 	else
   4465 		mta_reg = WMREG_MTA;
   4466 
   4467 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4468 
   4469 	if (sc->sc_if_flags & IFF_BROADCAST)
   4470 		sc->sc_rctl |= RCTL_BAM;
   4471 	if (sc->sc_if_flags & IFF_PROMISC) {
   4472 		sc->sc_rctl |= RCTL_UPE;
   4473 		ETHER_LOCK(ec);
   4474 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4475 		ETHER_UNLOCK(ec);
   4476 		goto allmulti;
   4477 	}
   4478 
   4479 	/*
   4480 	 * Set the station address in the first RAL slot, and
   4481 	 * clear the remaining slots.
   4482 	 */
   4483 	size = wm_rar_count(sc);
   4484 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4485 
   4486 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
   4487 	    (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
   4488 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4489 		switch (i) {
   4490 		case 0:
   4491 			/* We can use all entries */
   4492 			ralmax = size;
   4493 			break;
   4494 		case 1:
   4495 			/* Only RAR[0] */
   4496 			ralmax = 1;
   4497 			break;
   4498 		default:
   4499 			/* Available SHRA + RAR[0] */
   4500 			ralmax = i + 1;
   4501 		}
   4502 	} else
   4503 		ralmax = size;
   4504 	for (i = 1; i < size; i++) {
   4505 		if (i < ralmax)
   4506 			wm_set_ral(sc, NULL, i);
   4507 	}
   4508 
   4509 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4510 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4511 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4512 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
   4513 	    || (sc->sc_type == WM_T_PCH_TGP))
   4514 		size = WM_ICH8_MC_TABSIZE;
   4515 	else
   4516 		size = WM_MC_TABSIZE;
   4517 	/* Clear out the multicast table. */
   4518 	for (i = 0; i < size; i++) {
   4519 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4520 		CSR_WRITE_FLUSH(sc);
   4521 	}
   4522 
   4523 	ETHER_LOCK(ec);
   4524 	ETHER_FIRST_MULTI(step, ec, enm);
   4525 	while (enm != NULL) {
   4526 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4527 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4528 			ETHER_UNLOCK(ec);
   4529 			/*
   4530 			 * We must listen to a range of multicast addresses.
   4531 			 * For now, just accept all multicasts, rather than
   4532 			 * trying to set only those filter bits needed to match
   4533 			 * the range.  (At this time, the only use of address
   4534 			 * ranges is for IP multicast routing, for which the
   4535 			 * range is big enough to require all bits set.)
   4536 			 */
   4537 			goto allmulti;
   4538 		}
   4539 
   4540 		hash = wm_mchash(sc, enm->enm_addrlo);
   4541 
   4542 		reg = (hash >> 5);
   4543 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4544 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4545 		    || (sc->sc_type == WM_T_PCH2)
   4546 		    || (sc->sc_type == WM_T_PCH_LPT)
   4547 		    || (sc->sc_type == WM_T_PCH_SPT)
   4548 		    || (sc->sc_type == WM_T_PCH_CNP)
   4549 		    || (sc->sc_type == WM_T_PCH_TGP))
   4550 			reg &= 0x1f;
   4551 		else
   4552 			reg &= 0x7f;
   4553 		bit = hash & 0x1f;
   4554 
   4555 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4556 		hash |= 1U << bit;
   4557 
   4558 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4559 			/*
   4560 			 * 82544 Errata 9: Certain register cannot be written
   4561 			 * with particular alignments in PCI-X bus operation
   4562 			 * (FCAH, MTA and VFTA).
   4563 			 */
   4564 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4565 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4566 			CSR_WRITE_FLUSH(sc);
   4567 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4568 			CSR_WRITE_FLUSH(sc);
   4569 		} else {
   4570 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4571 			CSR_WRITE_FLUSH(sc);
   4572 		}
   4573 
   4574 		ETHER_NEXT_MULTI(step, enm);
   4575 	}
   4576 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4577 	ETHER_UNLOCK(ec);
   4578 
   4579 	goto setit;
   4580 
   4581 allmulti:
   4582 	sc->sc_rctl |= RCTL_MPE;
   4583 
   4584 setit:
   4585 	if (sc->sc_type >= WM_T_PCH2) {
   4586 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4587 		    && (ifp->if_mtu > ETHERMTU))
   4588 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4589 		else
   4590 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4591 		if (rv != 0)
   4592 			device_printf(sc->sc_dev,
   4593 			    "Failed to do workaround for jumbo frame.\n");
   4594 	}
   4595 
   4596 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4597 }
   4598 
   4599 /* Reset and init related */
   4600 
   4601 static void
   4602 wm_set_vlan(struct wm_softc *sc)
   4603 {
   4604 
   4605 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4606 		device_xname(sc->sc_dev), __func__));
   4607 
   4608 	/* Deal with VLAN enables. */
   4609 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4610 		sc->sc_ctrl |= CTRL_VME;
   4611 	else
   4612 		sc->sc_ctrl &= ~CTRL_VME;
   4613 
   4614 	/* Write the control registers. */
   4615 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4616 }
   4617 
   4618 static void
   4619 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4620 {
   4621 	uint32_t gcr;
   4622 	pcireg_t ctrl2;
   4623 
   4624 	gcr = CSR_READ(sc, WMREG_GCR);
   4625 
   4626 	/* Only take action if timeout value is defaulted to 0 */
   4627 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4628 		goto out;
   4629 
   4630 	if ((gcr & GCR_CAP_VER2) == 0) {
   4631 		gcr |= GCR_CMPL_TMOUT_10MS;
   4632 		goto out;
   4633 	}
   4634 
   4635 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4636 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4637 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4638 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4639 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4640 
   4641 out:
   4642 	/* Disable completion timeout resend */
   4643 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4644 
   4645 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4646 }
   4647 
   4648 void
   4649 wm_get_auto_rd_done(struct wm_softc *sc)
   4650 {
   4651 	int i;
   4652 
   4653 	/* wait for eeprom to reload */
   4654 	switch (sc->sc_type) {
   4655 	case WM_T_82571:
   4656 	case WM_T_82572:
   4657 	case WM_T_82573:
   4658 	case WM_T_82574:
   4659 	case WM_T_82583:
   4660 	case WM_T_82575:
   4661 	case WM_T_82576:
   4662 	case WM_T_82580:
   4663 	case WM_T_I350:
   4664 	case WM_T_I354:
   4665 	case WM_T_I210:
   4666 	case WM_T_I211:
   4667 	case WM_T_80003:
   4668 	case WM_T_ICH8:
   4669 	case WM_T_ICH9:
   4670 		for (i = 0; i < 10; i++) {
   4671 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4672 				break;
   4673 			delay(1000);
   4674 		}
   4675 		if (i == 10) {
   4676 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4677 			    "complete\n", device_xname(sc->sc_dev));
   4678 		}
   4679 		break;
   4680 	default:
   4681 		break;
   4682 	}
   4683 }
   4684 
   4685 void
   4686 wm_lan_init_done(struct wm_softc *sc)
   4687 {
   4688 	uint32_t reg = 0;
   4689 	int i;
   4690 
   4691 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4692 		device_xname(sc->sc_dev), __func__));
   4693 
   4694 	/* Wait for eeprom to reload */
   4695 	switch (sc->sc_type) {
   4696 	case WM_T_ICH10:
   4697 	case WM_T_PCH:
   4698 	case WM_T_PCH2:
   4699 	case WM_T_PCH_LPT:
   4700 	case WM_T_PCH_SPT:
   4701 	case WM_T_PCH_CNP:
   4702 	case WM_T_PCH_TGP:
   4703 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4704 			reg = CSR_READ(sc, WMREG_STATUS);
   4705 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4706 				break;
   4707 			delay(100);
   4708 		}
   4709 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4710 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4711 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4712 		}
   4713 		break;
   4714 	default:
   4715 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4716 		    __func__);
   4717 		break;
   4718 	}
   4719 
   4720 	reg &= ~STATUS_LAN_INIT_DONE;
   4721 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4722 }
   4723 
   4724 void
   4725 wm_get_cfg_done(struct wm_softc *sc)
   4726 {
   4727 	int mask;
   4728 	uint32_t reg;
   4729 	int i;
   4730 
   4731 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4732 		device_xname(sc->sc_dev), __func__));
   4733 
   4734 	/* Wait for eeprom to reload */
   4735 	switch (sc->sc_type) {
   4736 	case WM_T_82542_2_0:
   4737 	case WM_T_82542_2_1:
   4738 		/* null */
   4739 		break;
   4740 	case WM_T_82543:
   4741 	case WM_T_82544:
   4742 	case WM_T_82540:
   4743 	case WM_T_82545:
   4744 	case WM_T_82545_3:
   4745 	case WM_T_82546:
   4746 	case WM_T_82546_3:
   4747 	case WM_T_82541:
   4748 	case WM_T_82541_2:
   4749 	case WM_T_82547:
   4750 	case WM_T_82547_2:
   4751 	case WM_T_82573:
   4752 	case WM_T_82574:
   4753 	case WM_T_82583:
   4754 		/* generic */
   4755 		delay(10*1000);
   4756 		break;
   4757 	case WM_T_80003:
   4758 	case WM_T_82571:
   4759 	case WM_T_82572:
   4760 	case WM_T_82575:
   4761 	case WM_T_82576:
   4762 	case WM_T_82580:
   4763 	case WM_T_I350:
   4764 	case WM_T_I354:
   4765 	case WM_T_I210:
   4766 	case WM_T_I211:
   4767 		if (sc->sc_type == WM_T_82571) {
   4768 			/* Only 82571 shares port 0 */
   4769 			mask = EEMNGCTL_CFGDONE_0;
   4770 		} else
   4771 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4772 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4773 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4774 				break;
   4775 			delay(1000);
   4776 		}
   4777 		if (i >= WM_PHY_CFG_TIMEOUT)
   4778 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4779 				device_xname(sc->sc_dev), __func__));
   4780 		break;
   4781 	case WM_T_ICH8:
   4782 	case WM_T_ICH9:
   4783 	case WM_T_ICH10:
   4784 	case WM_T_PCH:
   4785 	case WM_T_PCH2:
   4786 	case WM_T_PCH_LPT:
   4787 	case WM_T_PCH_SPT:
   4788 	case WM_T_PCH_CNP:
   4789 	case WM_T_PCH_TGP:
   4790 		delay(10*1000);
   4791 		if (sc->sc_type >= WM_T_ICH10)
   4792 			wm_lan_init_done(sc);
   4793 		else
   4794 			wm_get_auto_rd_done(sc);
   4795 
   4796 		/* Clear PHY Reset Asserted bit */
   4797 		reg = CSR_READ(sc, WMREG_STATUS);
   4798 		if ((reg & STATUS_PHYRA) != 0)
   4799 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4800 		break;
   4801 	default:
   4802 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4803 		    __func__);
   4804 		break;
   4805 	}
   4806 }
   4807 
   4808 int
   4809 wm_phy_post_reset(struct wm_softc *sc)
   4810 {
   4811 	device_t dev = sc->sc_dev;
   4812 	uint16_t reg;
   4813 	int rv = 0;
   4814 
   4815 	/* This function is only for ICH8 and newer. */
   4816 	if (sc->sc_type < WM_T_ICH8)
   4817 		return 0;
   4818 
   4819 	if (wm_phy_resetisblocked(sc)) {
   4820 		/* XXX */
   4821 		device_printf(dev, "PHY is blocked\n");
   4822 		return -1;
   4823 	}
   4824 
   4825 	/* Allow time for h/w to get to quiescent state after reset */
   4826 	delay(10*1000);
   4827 
   4828 	/* Perform any necessary post-reset workarounds */
   4829 	if (sc->sc_type == WM_T_PCH)
   4830 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4831 	else if (sc->sc_type == WM_T_PCH2)
   4832 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4833 	if (rv != 0)
   4834 		return rv;
   4835 
   4836 	/* Clear the host wakeup bit after lcd reset */
   4837 	if (sc->sc_type >= WM_T_PCH) {
   4838 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4839 		reg &= ~BM_WUC_HOST_WU_BIT;
   4840 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4841 	}
   4842 
   4843 	/* Configure the LCD with the extended configuration region in NVM */
   4844 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4845 		return rv;
   4846 
   4847 	/* Configure the LCD with the OEM bits in NVM */
   4848 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4849 
   4850 	if (sc->sc_type == WM_T_PCH2) {
   4851 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4852 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4853 			delay(10 * 1000);
   4854 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4855 		}
   4856 		/* Set EEE LPI Update Timer to 200usec */
   4857 		rv = sc->phy.acquire(sc);
   4858 		if (rv)
   4859 			return rv;
   4860 		rv = wm_write_emi_reg_locked(dev,
   4861 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4862 		sc->phy.release(sc);
   4863 	}
   4864 
   4865 	return rv;
   4866 }
   4867 
   4868 /* Only for PCH and newer */
   4869 static int
   4870 wm_write_smbus_addr(struct wm_softc *sc)
   4871 {
   4872 	uint32_t strap, freq;
   4873 	uint16_t phy_data;
   4874 	int rv;
   4875 
   4876 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4877 		device_xname(sc->sc_dev), __func__));
   4878 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4879 
   4880 	strap = CSR_READ(sc, WMREG_STRAP);
   4881 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4882 
   4883 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4884 	if (rv != 0)
   4885 		return rv;
   4886 
   4887 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4888 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4889 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4890 
   4891 	if (sc->sc_phytype == WMPHY_I217) {
   4892 		/* Restore SMBus frequency */
   4893 		if (freq --) {
   4894 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4895 			    | HV_SMB_ADDR_FREQ_HIGH);
   4896 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4897 			    HV_SMB_ADDR_FREQ_LOW);
   4898 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4899 			    HV_SMB_ADDR_FREQ_HIGH);
   4900 		} else
   4901 			DPRINTF(sc, WM_DEBUG_INIT,
   4902 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4903 				device_xname(sc->sc_dev), __func__));
   4904 	}
   4905 
   4906 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4907 	    phy_data);
   4908 }
   4909 
   4910 static int
   4911 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4912 {
   4913 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4914 	uint16_t phy_page = 0;
   4915 	int rv = 0;
   4916 
   4917 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4918 		device_xname(sc->sc_dev), __func__));
   4919 
   4920 	switch (sc->sc_type) {
   4921 	case WM_T_ICH8:
   4922 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4923 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4924 			return 0;
   4925 
   4926 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4927 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4928 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4929 			break;
   4930 		}
   4931 		/* FALLTHROUGH */
   4932 	case WM_T_PCH:
   4933 	case WM_T_PCH2:
   4934 	case WM_T_PCH_LPT:
   4935 	case WM_T_PCH_SPT:
   4936 	case WM_T_PCH_CNP:
   4937 	case WM_T_PCH_TGP:
   4938 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4939 		break;
   4940 	default:
   4941 		return 0;
   4942 	}
   4943 
   4944 	if ((rv = sc->phy.acquire(sc)) != 0)
   4945 		return rv;
   4946 
   4947 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4948 	if ((reg & sw_cfg_mask) == 0)
   4949 		goto release;
   4950 
   4951 	/*
   4952 	 * Make sure HW does not configure LCD from PHY extended configuration
   4953 	 * before SW configuration
   4954 	 */
   4955 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4956 	if ((sc->sc_type < WM_T_PCH2)
   4957 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4958 		goto release;
   4959 
   4960 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4961 		device_xname(sc->sc_dev), __func__));
   4962 	/* word_addr is in DWORD */
   4963 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4964 
   4965 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4966 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4967 	if (cnf_size == 0)
   4968 		goto release;
   4969 
   4970 	if (((sc->sc_type == WM_T_PCH)
   4971 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4972 	    || (sc->sc_type > WM_T_PCH)) {
   4973 		/*
   4974 		 * HW configures the SMBus address and LEDs when the OEM and
   4975 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4976 		 * are cleared, SW will configure them instead.
   4977 		 */
   4978 		DPRINTF(sc, WM_DEBUG_INIT,
   4979 		    ("%s: %s: Configure SMBus and LED\n",
   4980 			device_xname(sc->sc_dev), __func__));
   4981 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4982 			goto release;
   4983 
   4984 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4985 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4986 		    (uint16_t)reg);
   4987 		if (rv != 0)
   4988 			goto release;
   4989 	}
   4990 
   4991 	/* Configure LCD from extended configuration region. */
   4992 	for (i = 0; i < cnf_size; i++) {
   4993 		uint16_t reg_data, reg_addr;
   4994 
   4995 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4996 			goto release;
   4997 
   4998 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4999 			goto release;
   5000 
   5001 		if (reg_addr == IGPHY_PAGE_SELECT)
   5002 			phy_page = reg_data;
   5003 
   5004 		reg_addr &= IGPHY_MAXREGADDR;
   5005 		reg_addr |= phy_page;
   5006 
   5007 		KASSERT(sc->phy.writereg_locked != NULL);
   5008 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   5009 		    reg_data);
   5010 	}
   5011 
   5012 release:
   5013 	sc->phy.release(sc);
   5014 	return rv;
   5015 }
   5016 
   5017 /*
   5018  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   5019  *  @sc:       pointer to the HW structure
   5020  *  @d0_state: boolean if entering d0 or d3 device state
   5021  *
   5022  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   5023  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   5024  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   5025  */
   5026 int
   5027 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   5028 {
   5029 	uint32_t mac_reg;
   5030 	uint16_t oem_reg;
   5031 	int rv;
   5032 
   5033 	if (sc->sc_type < WM_T_PCH)
   5034 		return 0;
   5035 
   5036 	rv = sc->phy.acquire(sc);
   5037 	if (rv != 0)
   5038 		return rv;
   5039 
   5040 	if (sc->sc_type == WM_T_PCH) {
   5041 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   5042 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   5043 			goto release;
   5044 	}
   5045 
   5046 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   5047 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   5048 		goto release;
   5049 
   5050 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   5051 
   5052 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   5053 	if (rv != 0)
   5054 		goto release;
   5055 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   5056 
   5057 	if (d0_state) {
   5058 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   5059 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5060 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   5061 			oem_reg |= HV_OEM_BITS_LPLU;
   5062 	} else {
   5063 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   5064 		    != 0)
   5065 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5066 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   5067 		    != 0)
   5068 			oem_reg |= HV_OEM_BITS_LPLU;
   5069 	}
   5070 
   5071 	/* Set Restart auto-neg to activate the bits */
   5072 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   5073 	    && (wm_phy_resetisblocked(sc) == false))
   5074 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   5075 
   5076 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   5077 
   5078 release:
   5079 	sc->phy.release(sc);
   5080 
   5081 	return rv;
   5082 }
   5083 
   5084 /* Init hardware bits */
   5085 void
   5086 wm_initialize_hardware_bits(struct wm_softc *sc)
   5087 {
   5088 	uint32_t tarc0, tarc1, reg;
   5089 
   5090 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5091 		device_xname(sc->sc_dev), __func__));
   5092 
   5093 	/* For 82571 variant, 80003 and ICHs */
   5094 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   5095 	    || WM_IS_ICHPCH(sc)) {
   5096 
   5097 		/* Transmit Descriptor Control 0 */
   5098 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   5099 		reg |= TXDCTL_COUNT_DESC;
   5100 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   5101 
   5102 		/* Transmit Descriptor Control 1 */
   5103 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   5104 		reg |= TXDCTL_COUNT_DESC;
   5105 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   5106 
   5107 		/* TARC0 */
   5108 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   5109 		switch (sc->sc_type) {
   5110 		case WM_T_82571:
   5111 		case WM_T_82572:
   5112 		case WM_T_82573:
   5113 		case WM_T_82574:
   5114 		case WM_T_82583:
   5115 		case WM_T_80003:
   5116 			/* Clear bits 30..27 */
   5117 			tarc0 &= ~__BITS(30, 27);
   5118 			break;
   5119 		default:
   5120 			break;
   5121 		}
   5122 
   5123 		switch (sc->sc_type) {
   5124 		case WM_T_82571:
   5125 		case WM_T_82572:
   5126 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5127 
   5128 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5129 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5130 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5131 			/* 8257[12] Errata No.7 */
   5132 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5133 
   5134 			/* TARC1 bit 28 */
   5135 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5136 				tarc1 &= ~__BIT(28);
   5137 			else
   5138 				tarc1 |= __BIT(28);
   5139 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5140 
   5141 			/*
   5142 			 * 8257[12] Errata No.13
   5143 			 * Disable Dyamic Clock Gating.
   5144 			 */
   5145 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5146 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5147 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5148 			break;
   5149 		case WM_T_82573:
   5150 		case WM_T_82574:
   5151 		case WM_T_82583:
   5152 			if ((sc->sc_type == WM_T_82574)
   5153 			    || (sc->sc_type == WM_T_82583))
   5154 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5155 
   5156 			/* Extended Device Control */
   5157 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5158 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5159 			reg |= __BIT(22);	/* Set bit 22 */
   5160 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5161 
   5162 			/* Device Control */
   5163 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5164 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5165 
   5166 			/* PCIe Control Register */
   5167 			/*
   5168 			 * 82573 Errata (unknown).
   5169 			 *
   5170 			 * 82574 Errata 25 and 82583 Errata 12
   5171 			 * "Dropped Rx Packets":
   5172 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5173 			 */
   5174 			reg = CSR_READ(sc, WMREG_GCR);
   5175 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5176 			CSR_WRITE(sc, WMREG_GCR, reg);
   5177 
   5178 			if ((sc->sc_type == WM_T_82574)
   5179 			    || (sc->sc_type == WM_T_82583)) {
   5180 				/*
   5181 				 * Document says this bit must be set for
   5182 				 * proper operation.
   5183 				 */
   5184 				reg = CSR_READ(sc, WMREG_GCR);
   5185 				reg |= __BIT(22);
   5186 				CSR_WRITE(sc, WMREG_GCR, reg);
   5187 
   5188 				/*
   5189 				 * Apply workaround for hardware errata
   5190 				 * documented in errata docs Fixes issue where
   5191 				 * some error prone or unreliable PCIe
   5192 				 * completions are occurring, particularly
   5193 				 * with ASPM enabled. Without fix, issue can
   5194 				 * cause Tx timeouts.
   5195 				 */
   5196 				reg = CSR_READ(sc, WMREG_GCR2);
   5197 				reg |= __BIT(0);
   5198 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5199 			}
   5200 			break;
   5201 		case WM_T_80003:
   5202 			/* TARC0 */
   5203 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5204 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5205 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5206 
   5207 			/* TARC1 bit 28 */
   5208 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5209 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5210 				tarc1 &= ~__BIT(28);
   5211 			else
   5212 				tarc1 |= __BIT(28);
   5213 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5214 			break;
   5215 		case WM_T_ICH8:
   5216 		case WM_T_ICH9:
   5217 		case WM_T_ICH10:
   5218 		case WM_T_PCH:
   5219 		case WM_T_PCH2:
   5220 		case WM_T_PCH_LPT:
   5221 		case WM_T_PCH_SPT:
   5222 		case WM_T_PCH_CNP:
   5223 		case WM_T_PCH_TGP:
   5224 			/* TARC0 */
   5225 			if (sc->sc_type == WM_T_ICH8) {
   5226 				/* Set TARC0 bits 29 and 28 */
   5227 				tarc0 |= __BITS(29, 28);
   5228 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5229 				tarc0 |= __BIT(29);
   5230 				/*
   5231 				 *  Drop bit 28. From Linux.
   5232 				 * See I218/I219 spec update
   5233 				 * "5. Buffer Overrun While the I219 is
   5234 				 * Processing DMA Transactions"
   5235 				 */
   5236 				tarc0 &= ~__BIT(28);
   5237 			}
   5238 			/* Set TARC0 bits 23,24,26,27 */
   5239 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5240 
   5241 			/* CTRL_EXT */
   5242 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5243 			reg |= __BIT(22);	/* Set bit 22 */
   5244 			/*
   5245 			 * Enable PHY low-power state when MAC is at D3
   5246 			 * w/o WoL
   5247 			 */
   5248 			if (sc->sc_type >= WM_T_PCH)
   5249 				reg |= CTRL_EXT_PHYPDEN;
   5250 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5251 
   5252 			/* TARC1 */
   5253 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5254 			/* bit 28 */
   5255 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5256 				tarc1 &= ~__BIT(28);
   5257 			else
   5258 				tarc1 |= __BIT(28);
   5259 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5260 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5261 
   5262 			/* Device Status */
   5263 			if (sc->sc_type == WM_T_ICH8) {
   5264 				reg = CSR_READ(sc, WMREG_STATUS);
   5265 				reg &= ~__BIT(31);
   5266 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5267 
   5268 			}
   5269 
   5270 			/* IOSFPC */
   5271 			if (sc->sc_type == WM_T_PCH_SPT) {
   5272 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5273 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5274 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5275 			}
   5276 			/*
   5277 			 * Work-around descriptor data corruption issue during
   5278 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5279 			 * capability.
   5280 			 */
   5281 			reg = CSR_READ(sc, WMREG_RFCTL);
   5282 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5283 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5284 			break;
   5285 		default:
   5286 			break;
   5287 		}
   5288 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5289 
   5290 		switch (sc->sc_type) {
   5291 		case WM_T_82571:
   5292 		case WM_T_82572:
   5293 		case WM_T_82573:
   5294 		case WM_T_80003:
   5295 		case WM_T_ICH8:
   5296 			/*
   5297 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
   5298 			 * others to avoid RSS Hash Value bug.
   5299 			 */
   5300 			reg = CSR_READ(sc, WMREG_RFCTL);
   5301 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5302 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5303 			break;
   5304 		case WM_T_82574:
   5305 			/* Use extened Rx descriptor. */
   5306 			reg = CSR_READ(sc, WMREG_RFCTL);
   5307 			reg |= WMREG_RFCTL_EXSTEN;
   5308 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5309 			break;
   5310 		default:
   5311 			break;
   5312 		}
   5313 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5314 		/*
   5315 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5316 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5317 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5318 		 * Correctly by the Device"
   5319 		 *
   5320 		 * I354(C2000) Errata AVR53:
   5321 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5322 		 * Hang"
   5323 		 */
   5324 		reg = CSR_READ(sc, WMREG_RFCTL);
   5325 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5326 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5327 	}
   5328 }
   5329 
   5330 static uint32_t
   5331 wm_rxpbs_adjust_82580(uint32_t val)
   5332 {
   5333 	uint32_t rv = 0;
   5334 
   5335 	if (val < __arraycount(wm_82580_rxpbs_table))
   5336 		rv = wm_82580_rxpbs_table[val];
   5337 
   5338 	return rv;
   5339 }
   5340 
   5341 /*
   5342  * wm_reset_phy:
   5343  *
   5344  *	generic PHY reset function.
   5345  *	Same as e1000_phy_hw_reset_generic()
   5346  */
   5347 static int
   5348 wm_reset_phy(struct wm_softc *sc)
   5349 {
   5350 	uint32_t reg;
   5351 	int rv;
   5352 
   5353 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5354 		device_xname(sc->sc_dev), __func__));
   5355 	if (wm_phy_resetisblocked(sc))
   5356 		return -1;
   5357 
   5358 	rv = sc->phy.acquire(sc);
   5359 	if (rv) {
   5360 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5361 		    __func__, rv);
   5362 		return rv;
   5363 	}
   5364 
   5365 	reg = CSR_READ(sc, WMREG_CTRL);
   5366 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5367 	CSR_WRITE_FLUSH(sc);
   5368 
   5369 	delay(sc->phy.reset_delay_us);
   5370 
   5371 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5372 	CSR_WRITE_FLUSH(sc);
   5373 
   5374 	delay(150);
   5375 
   5376 	sc->phy.release(sc);
   5377 
   5378 	wm_get_cfg_done(sc);
   5379 	wm_phy_post_reset(sc);
   5380 
   5381 	return 0;
   5382 }
   5383 
   5384 /*
   5385  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5386  *
   5387  * In i219, the descriptor rings must be emptied before resetting the HW
   5388  * or before changing the device state to D3 during runtime (runtime PM).
   5389  *
   5390  * Failure to do this will cause the HW to enter a unit hang state which can
   5391  * only be released by PCI reset on the device.
   5392  *
   5393  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5394  */
   5395 static void
   5396 wm_flush_desc_rings(struct wm_softc *sc)
   5397 {
   5398 	pcireg_t preg;
   5399 	uint32_t reg;
   5400 	struct wm_txqueue *txq;
   5401 	wiseman_txdesc_t *txd;
   5402 	int nexttx;
   5403 	uint32_t rctl;
   5404 
   5405 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5406 
   5407 	/* First, disable MULR fix in FEXTNVM11 */
   5408 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5409 	reg |= FEXTNVM11_DIS_MULRFIX;
   5410 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5411 
   5412 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5413 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5414 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5415 		return;
   5416 
   5417 	/*
   5418 	 * Remove all descriptors from the tx_ring.
   5419 	 *
   5420 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5421 	 * happens when the HW reads the regs. We assign the ring itself as
   5422 	 * the data of the next descriptor. We don't care about the data we are
   5423 	 * about to reset the HW.
   5424 	 */
   5425 #ifdef WM_DEBUG
   5426 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5427 #endif
   5428 	reg = CSR_READ(sc, WMREG_TCTL);
   5429 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5430 
   5431 	txq = &sc->sc_queue[0].wmq_txq;
   5432 	nexttx = txq->txq_next;
   5433 	txd = &txq->txq_descs[nexttx];
   5434 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5435 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5436 	txd->wtx_fields.wtxu_status = 0;
   5437 	txd->wtx_fields.wtxu_options = 0;
   5438 	txd->wtx_fields.wtxu_vlan = 0;
   5439 
   5440 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5441 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5442 
   5443 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5444 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5445 	CSR_WRITE_FLUSH(sc);
   5446 	delay(250);
   5447 
   5448 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5449 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5450 		return;
   5451 
   5452 	/*
   5453 	 * Mark all descriptors in the RX ring as consumed and disable the
   5454 	 * rx ring.
   5455 	 */
   5456 #ifdef WM_DEBUG
   5457 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5458 #endif
   5459 	rctl = CSR_READ(sc, WMREG_RCTL);
   5460 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5461 	CSR_WRITE_FLUSH(sc);
   5462 	delay(150);
   5463 
   5464 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5465 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5466 	reg &= 0xffffc000;
   5467 	/*
   5468 	 * Update thresholds: prefetch threshold to 31, host threshold
   5469 	 * to 1 and make sure the granularity is "descriptors" and not
   5470 	 * "cache lines"
   5471 	 */
   5472 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5473 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5474 
   5475 	/* Momentarily enable the RX ring for the changes to take effect */
   5476 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5477 	CSR_WRITE_FLUSH(sc);
   5478 	delay(150);
   5479 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5480 }
   5481 
   5482 /*
   5483  * wm_reset:
   5484  *
   5485  *	Reset the i82542 chip.
   5486  */
   5487 static void
   5488 wm_reset(struct wm_softc *sc)
   5489 {
   5490 	int phy_reset = 0;
   5491 	int i, error = 0;
   5492 	uint32_t reg;
   5493 	uint16_t kmreg;
   5494 	int rv;
   5495 
   5496 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5497 		device_xname(sc->sc_dev), __func__));
   5498 	KASSERT(sc->sc_type != 0);
   5499 
   5500 	/*
   5501 	 * Allocate on-chip memory according to the MTU size.
   5502 	 * The Packet Buffer Allocation register must be written
   5503 	 * before the chip is reset.
   5504 	 */
   5505 	switch (sc->sc_type) {
   5506 	case WM_T_82547:
   5507 	case WM_T_82547_2:
   5508 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5509 		    PBA_22K : PBA_30K;
   5510 		for (i = 0; i < sc->sc_nqueues; i++) {
   5511 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5512 			txq->txq_fifo_head = 0;
   5513 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5514 			txq->txq_fifo_size =
   5515 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5516 			txq->txq_fifo_stall = 0;
   5517 		}
   5518 		break;
   5519 	case WM_T_82571:
   5520 	case WM_T_82572:
   5521 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5522 	case WM_T_80003:
   5523 		sc->sc_pba = PBA_32K;
   5524 		break;
   5525 	case WM_T_82573:
   5526 		sc->sc_pba = PBA_12K;
   5527 		break;
   5528 	case WM_T_82574:
   5529 	case WM_T_82583:
   5530 		sc->sc_pba = PBA_20K;
   5531 		break;
   5532 	case WM_T_82576:
   5533 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5534 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5535 		break;
   5536 	case WM_T_82580:
   5537 	case WM_T_I350:
   5538 	case WM_T_I354:
   5539 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5540 		break;
   5541 	case WM_T_I210:
   5542 	case WM_T_I211:
   5543 		sc->sc_pba = PBA_34K;
   5544 		break;
   5545 	case WM_T_ICH8:
   5546 		/* Workaround for a bit corruption issue in FIFO memory */
   5547 		sc->sc_pba = PBA_8K;
   5548 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5549 		break;
   5550 	case WM_T_ICH9:
   5551 	case WM_T_ICH10:
   5552 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5553 		    PBA_14K : PBA_10K;
   5554 		break;
   5555 	case WM_T_PCH:
   5556 	case WM_T_PCH2:	/* XXX 14K? */
   5557 	case WM_T_PCH_LPT:
   5558 	case WM_T_PCH_SPT:
   5559 	case WM_T_PCH_CNP:
   5560 	case WM_T_PCH_TGP:
   5561 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5562 		    PBA_12K : PBA_26K;
   5563 		break;
   5564 	default:
   5565 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5566 		    PBA_40K : PBA_48K;
   5567 		break;
   5568 	}
   5569 	/*
   5570 	 * Only old or non-multiqueue devices have the PBA register
   5571 	 * XXX Need special handling for 82575.
   5572 	 */
   5573 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5574 	    || (sc->sc_type == WM_T_82575))
   5575 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5576 
   5577 	/* Prevent the PCI-E bus from sticking */
   5578 	if (sc->sc_flags & WM_F_PCIE) {
   5579 		int timeout = 800;
   5580 
   5581 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5582 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5583 
   5584 		while (timeout--) {
   5585 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5586 			    == 0)
   5587 				break;
   5588 			delay(100);
   5589 		}
   5590 		if (timeout == 0)
   5591 			device_printf(sc->sc_dev,
   5592 			    "failed to disable bus mastering\n");
   5593 	}
   5594 
   5595 	/* Set the completion timeout for interface */
   5596 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5597 	    || (sc->sc_type == WM_T_82580)
   5598 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5599 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5600 		wm_set_pcie_completion_timeout(sc);
   5601 
   5602 	/* Clear interrupt */
   5603 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5604 	if (wm_is_using_msix(sc)) {
   5605 		if (sc->sc_type != WM_T_82574) {
   5606 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5607 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5608 		} else
   5609 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5610 	}
   5611 
   5612 	/* Stop the transmit and receive processes. */
   5613 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5614 	sc->sc_rctl &= ~RCTL_EN;
   5615 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5616 	CSR_WRITE_FLUSH(sc);
   5617 
   5618 	/* XXX set_tbi_sbp_82543() */
   5619 
   5620 	delay(10*1000);
   5621 
   5622 	/* Must acquire the MDIO ownership before MAC reset */
   5623 	switch (sc->sc_type) {
   5624 	case WM_T_82573:
   5625 	case WM_T_82574:
   5626 	case WM_T_82583:
   5627 		error = wm_get_hw_semaphore_82573(sc);
   5628 		break;
   5629 	default:
   5630 		break;
   5631 	}
   5632 
   5633 	/*
   5634 	 * 82541 Errata 29? & 82547 Errata 28?
   5635 	 * See also the description about PHY_RST bit in CTRL register
   5636 	 * in 8254x_GBe_SDM.pdf.
   5637 	 */
   5638 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5639 		CSR_WRITE(sc, WMREG_CTRL,
   5640 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5641 		CSR_WRITE_FLUSH(sc);
   5642 		delay(5000);
   5643 	}
   5644 
   5645 	switch (sc->sc_type) {
   5646 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5647 	case WM_T_82541:
   5648 	case WM_T_82541_2:
   5649 	case WM_T_82547:
   5650 	case WM_T_82547_2:
   5651 		/*
   5652 		 * On some chipsets, a reset through a memory-mapped write
   5653 		 * cycle can cause the chip to reset before completing the
   5654 		 * write cycle. This causes major headache that can be avoided
   5655 		 * by issuing the reset via indirect register writes through
   5656 		 * I/O space.
   5657 		 *
   5658 		 * So, if we successfully mapped the I/O BAR at attach time,
   5659 		 * use that. Otherwise, try our luck with a memory-mapped
   5660 		 * reset.
   5661 		 */
   5662 		if (sc->sc_flags & WM_F_IOH_VALID)
   5663 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5664 		else
   5665 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5666 		break;
   5667 	case WM_T_82545_3:
   5668 	case WM_T_82546_3:
   5669 		/* Use the shadow control register on these chips. */
   5670 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5671 		break;
   5672 	case WM_T_80003:
   5673 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5674 		if (sc->phy.acquire(sc) != 0)
   5675 			break;
   5676 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5677 		sc->phy.release(sc);
   5678 		break;
   5679 	case WM_T_ICH8:
   5680 	case WM_T_ICH9:
   5681 	case WM_T_ICH10:
   5682 	case WM_T_PCH:
   5683 	case WM_T_PCH2:
   5684 	case WM_T_PCH_LPT:
   5685 	case WM_T_PCH_SPT:
   5686 	case WM_T_PCH_CNP:
   5687 	case WM_T_PCH_TGP:
   5688 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5689 		if (wm_phy_resetisblocked(sc) == false) {
   5690 			/*
   5691 			 * Gate automatic PHY configuration by hardware on
   5692 			 * non-managed 82579
   5693 			 */
   5694 			if ((sc->sc_type == WM_T_PCH2)
   5695 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5696 				== 0))
   5697 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5698 
   5699 			reg |= CTRL_PHY_RESET;
   5700 			phy_reset = 1;
   5701 		} else
   5702 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5703 		if (sc->phy.acquire(sc) != 0)
   5704 			break;
   5705 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5706 		/* Don't insert a completion barrier when reset */
   5707 		delay(20*1000);
   5708 		/*
   5709 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
   5710 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
   5711 		 * only. See also wm_get_swflag_ich8lan().
   5712 		 */
   5713 		mutex_exit(sc->sc_ich_phymtx);
   5714 		break;
   5715 	case WM_T_82580:
   5716 	case WM_T_I350:
   5717 	case WM_T_I354:
   5718 	case WM_T_I210:
   5719 	case WM_T_I211:
   5720 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5721 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5722 			CSR_WRITE_FLUSH(sc);
   5723 		delay(5000);
   5724 		break;
   5725 	case WM_T_82542_2_0:
   5726 	case WM_T_82542_2_1:
   5727 	case WM_T_82543:
   5728 	case WM_T_82540:
   5729 	case WM_T_82545:
   5730 	case WM_T_82546:
   5731 	case WM_T_82571:
   5732 	case WM_T_82572:
   5733 	case WM_T_82573:
   5734 	case WM_T_82574:
   5735 	case WM_T_82575:
   5736 	case WM_T_82576:
   5737 	case WM_T_82583:
   5738 	default:
   5739 		/* Everything else can safely use the documented method. */
   5740 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5741 		break;
   5742 	}
   5743 
   5744 	/* Must release the MDIO ownership after MAC reset */
   5745 	switch (sc->sc_type) {
   5746 	case WM_T_82573:
   5747 	case WM_T_82574:
   5748 	case WM_T_82583:
   5749 		if (error == 0)
   5750 			wm_put_hw_semaphore_82573(sc);
   5751 		break;
   5752 	default:
   5753 		break;
   5754 	}
   5755 
   5756 	/* Set Phy Config Counter to 50msec */
   5757 	if (sc->sc_type == WM_T_PCH2) {
   5758 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5759 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5760 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5761 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5762 	}
   5763 
   5764 	if (phy_reset != 0)
   5765 		wm_get_cfg_done(sc);
   5766 
   5767 	/* Reload EEPROM */
   5768 	switch (sc->sc_type) {
   5769 	case WM_T_82542_2_0:
   5770 	case WM_T_82542_2_1:
   5771 	case WM_T_82543:
   5772 	case WM_T_82544:
   5773 		delay(10);
   5774 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5775 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5776 		CSR_WRITE_FLUSH(sc);
   5777 		delay(2000);
   5778 		break;
   5779 	case WM_T_82540:
   5780 	case WM_T_82545:
   5781 	case WM_T_82545_3:
   5782 	case WM_T_82546:
   5783 	case WM_T_82546_3:
   5784 		delay(5*1000);
   5785 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5786 		break;
   5787 	case WM_T_82541:
   5788 	case WM_T_82541_2:
   5789 	case WM_T_82547:
   5790 	case WM_T_82547_2:
   5791 		delay(20000);
   5792 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5793 		break;
   5794 	case WM_T_82571:
   5795 	case WM_T_82572:
   5796 	case WM_T_82573:
   5797 	case WM_T_82574:
   5798 	case WM_T_82583:
   5799 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5800 			delay(10);
   5801 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5802 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5803 			CSR_WRITE_FLUSH(sc);
   5804 		}
   5805 		/* check EECD_EE_AUTORD */
   5806 		wm_get_auto_rd_done(sc);
   5807 		/*
   5808 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5809 		 * is set.
   5810 		 */
   5811 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5812 		    || (sc->sc_type == WM_T_82583))
   5813 			delay(25*1000);
   5814 		break;
   5815 	case WM_T_82575:
   5816 	case WM_T_82576:
   5817 	case WM_T_82580:
   5818 	case WM_T_I350:
   5819 	case WM_T_I354:
   5820 	case WM_T_I210:
   5821 	case WM_T_I211:
   5822 	case WM_T_80003:
   5823 		/* check EECD_EE_AUTORD */
   5824 		wm_get_auto_rd_done(sc);
   5825 		break;
   5826 	case WM_T_ICH8:
   5827 	case WM_T_ICH9:
   5828 	case WM_T_ICH10:
   5829 	case WM_T_PCH:
   5830 	case WM_T_PCH2:
   5831 	case WM_T_PCH_LPT:
   5832 	case WM_T_PCH_SPT:
   5833 	case WM_T_PCH_CNP:
   5834 	case WM_T_PCH_TGP:
   5835 		break;
   5836 	default:
   5837 		panic("%s: unknown type\n", __func__);
   5838 	}
   5839 
   5840 	/* Check whether EEPROM is present or not */
   5841 	switch (sc->sc_type) {
   5842 	case WM_T_82575:
   5843 	case WM_T_82576:
   5844 	case WM_T_82580:
   5845 	case WM_T_I350:
   5846 	case WM_T_I354:
   5847 	case WM_T_ICH8:
   5848 	case WM_T_ICH9:
   5849 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5850 			/* Not found */
   5851 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5852 			if (sc->sc_type == WM_T_82575)
   5853 				wm_reset_init_script_82575(sc);
   5854 		}
   5855 		break;
   5856 	default:
   5857 		break;
   5858 	}
   5859 
   5860 	if (phy_reset != 0)
   5861 		wm_phy_post_reset(sc);
   5862 
   5863 	if ((sc->sc_type == WM_T_82580)
   5864 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5865 		/* Clear global device reset status bit */
   5866 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5867 	}
   5868 
   5869 	/* Clear any pending interrupt events. */
   5870 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5871 	reg = CSR_READ(sc, WMREG_ICR);
   5872 	if (wm_is_using_msix(sc)) {
   5873 		if (sc->sc_type != WM_T_82574) {
   5874 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5875 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5876 		} else
   5877 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5878 	}
   5879 
   5880 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5881 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5882 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5883 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
   5884 	    || (sc->sc_type == WM_T_PCH_TGP)) {
   5885 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5886 		reg |= KABGTXD_BGSQLBIAS;
   5887 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5888 	}
   5889 
   5890 	/* Reload sc_ctrl */
   5891 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5892 
   5893 	wm_set_eee(sc);
   5894 
   5895 	/*
   5896 	 * For PCH, this write will make sure that any noise will be detected
   5897 	 * as a CRC error and be dropped rather than show up as a bad packet
   5898 	 * to the DMA engine
   5899 	 */
   5900 	if (sc->sc_type == WM_T_PCH)
   5901 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5902 
   5903 	if (sc->sc_type >= WM_T_82544)
   5904 		CSR_WRITE(sc, WMREG_WUC, 0);
   5905 
   5906 	if (sc->sc_type < WM_T_82575)
   5907 		wm_disable_aspm(sc); /* Workaround for some chips */
   5908 
   5909 	wm_reset_mdicnfg_82580(sc);
   5910 
   5911 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5912 		wm_pll_workaround_i210(sc);
   5913 
   5914 	if (sc->sc_type == WM_T_80003) {
   5915 		/* Default to TRUE to enable the MDIC W/A */
   5916 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5917 
   5918 		rv = wm_kmrn_readreg(sc,
   5919 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5920 		if (rv == 0) {
   5921 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5922 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5923 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5924 			else
   5925 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5926 		}
   5927 	}
   5928 }
   5929 
   5930 /*
   5931  * wm_add_rxbuf:
   5932  *
   5933  *	Add a receive buffer to the indiciated descriptor.
   5934  */
   5935 static int
   5936 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5937 {
   5938 	struct wm_softc *sc = rxq->rxq_sc;
   5939 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5940 	struct mbuf *m;
   5941 	int error;
   5942 
   5943 	KASSERT(mutex_owned(rxq->rxq_lock));
   5944 
   5945 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5946 	if (m == NULL)
   5947 		return ENOBUFS;
   5948 
   5949 	MCLGET(m, M_DONTWAIT);
   5950 	if ((m->m_flags & M_EXT) == 0) {
   5951 		m_freem(m);
   5952 		return ENOBUFS;
   5953 	}
   5954 
   5955 	if (rxs->rxs_mbuf != NULL)
   5956 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5957 
   5958 	rxs->rxs_mbuf = m;
   5959 
   5960 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5961 	/*
   5962 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5963 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5964 	 */
   5965 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5966 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5967 	if (error) {
   5968 		/* XXX XXX XXX */
   5969 		aprint_error_dev(sc->sc_dev,
   5970 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5971 		panic("wm_add_rxbuf");
   5972 	}
   5973 
   5974 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5975 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5976 
   5977 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5978 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5979 			wm_init_rxdesc(rxq, idx);
   5980 	} else
   5981 		wm_init_rxdesc(rxq, idx);
   5982 
   5983 	return 0;
   5984 }
   5985 
   5986 /*
   5987  * wm_rxdrain:
   5988  *
   5989  *	Drain the receive queue.
   5990  */
   5991 static void
   5992 wm_rxdrain(struct wm_rxqueue *rxq)
   5993 {
   5994 	struct wm_softc *sc = rxq->rxq_sc;
   5995 	struct wm_rxsoft *rxs;
   5996 	int i;
   5997 
   5998 	KASSERT(mutex_owned(rxq->rxq_lock));
   5999 
   6000 	for (i = 0; i < WM_NRXDESC; i++) {
   6001 		rxs = &rxq->rxq_soft[i];
   6002 		if (rxs->rxs_mbuf != NULL) {
   6003 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   6004 			m_freem(rxs->rxs_mbuf);
   6005 			rxs->rxs_mbuf = NULL;
   6006 		}
   6007 	}
   6008 }
   6009 
   6010 /*
   6011  * Setup registers for RSS.
   6012  *
   6013  * XXX not yet VMDq support
   6014  */
   6015 static void
   6016 wm_init_rss(struct wm_softc *sc)
   6017 {
   6018 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   6019 	int i;
   6020 
   6021 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   6022 
   6023 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   6024 		unsigned int qid, reta_ent;
   6025 
   6026 		qid  = i % sc->sc_nqueues;
   6027 		switch (sc->sc_type) {
   6028 		case WM_T_82574:
   6029 			reta_ent = __SHIFTIN(qid,
   6030 			    RETA_ENT_QINDEX_MASK_82574);
   6031 			break;
   6032 		case WM_T_82575:
   6033 			reta_ent = __SHIFTIN(qid,
   6034 			    RETA_ENT_QINDEX1_MASK_82575);
   6035 			break;
   6036 		default:
   6037 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   6038 			break;
   6039 		}
   6040 
   6041 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   6042 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   6043 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   6044 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   6045 	}
   6046 
   6047 	rss_getkey((uint8_t *)rss_key);
   6048 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   6049 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   6050 
   6051 	if (sc->sc_type == WM_T_82574)
   6052 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   6053 	else
   6054 		mrqc = MRQC_ENABLE_RSS_MQ;
   6055 
   6056 	/*
   6057 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   6058 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   6059 	 */
   6060 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   6061 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   6062 #if 0
   6063 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   6064 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   6065 #endif
   6066 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   6067 
   6068 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   6069 }
   6070 
   6071 /*
   6072  * Adjust TX and RX queue numbers which the system actulally uses.
   6073  *
   6074  * The numbers are affected by below parameters.
   6075  *     - The nubmer of hardware queues
   6076  *     - The number of MSI-X vectors (= "nvectors" argument)
   6077  *     - ncpu
   6078  */
   6079 static void
   6080 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   6081 {
   6082 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   6083 
   6084 	if (nvectors < 2) {
   6085 		sc->sc_nqueues = 1;
   6086 		return;
   6087 	}
   6088 
   6089 	switch (sc->sc_type) {
   6090 	case WM_T_82572:
   6091 		hw_ntxqueues = 2;
   6092 		hw_nrxqueues = 2;
   6093 		break;
   6094 	case WM_T_82574:
   6095 		hw_ntxqueues = 2;
   6096 		hw_nrxqueues = 2;
   6097 		break;
   6098 	case WM_T_82575:
   6099 		hw_ntxqueues = 4;
   6100 		hw_nrxqueues = 4;
   6101 		break;
   6102 	case WM_T_82576:
   6103 		hw_ntxqueues = 16;
   6104 		hw_nrxqueues = 16;
   6105 		break;
   6106 	case WM_T_82580:
   6107 	case WM_T_I350:
   6108 	case WM_T_I354:
   6109 		hw_ntxqueues = 8;
   6110 		hw_nrxqueues = 8;
   6111 		break;
   6112 	case WM_T_I210:
   6113 		hw_ntxqueues = 4;
   6114 		hw_nrxqueues = 4;
   6115 		break;
   6116 	case WM_T_I211:
   6117 		hw_ntxqueues = 2;
   6118 		hw_nrxqueues = 2;
   6119 		break;
   6120 		/*
   6121 		 * The below Ethernet controllers do not support MSI-X;
   6122 		 * this driver doesn't let them use multiqueue.
   6123 		 *     - WM_T_80003
   6124 		 *     - WM_T_ICH8
   6125 		 *     - WM_T_ICH9
   6126 		 *     - WM_T_ICH10
   6127 		 *     - WM_T_PCH
   6128 		 *     - WM_T_PCH2
   6129 		 *     - WM_T_PCH_LPT
   6130 		 */
   6131 	default:
   6132 		hw_ntxqueues = 1;
   6133 		hw_nrxqueues = 1;
   6134 		break;
   6135 	}
   6136 
   6137 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6138 
   6139 	/*
   6140 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6141 	 * the number of queues used actually.
   6142 	 */
   6143 	if (nvectors < hw_nqueues + 1)
   6144 		sc->sc_nqueues = nvectors - 1;
   6145 	else
   6146 		sc->sc_nqueues = hw_nqueues;
   6147 
   6148 	/*
   6149 	 * As queues more than CPUs cannot improve scaling, we limit
   6150 	 * the number of queues used actually.
   6151 	 */
   6152 	if (ncpu < sc->sc_nqueues)
   6153 		sc->sc_nqueues = ncpu;
   6154 }
   6155 
   6156 static inline bool
   6157 wm_is_using_msix(struct wm_softc *sc)
   6158 {
   6159 
   6160 	return (sc->sc_nintrs > 1);
   6161 }
   6162 
   6163 static inline bool
   6164 wm_is_using_multiqueue(struct wm_softc *sc)
   6165 {
   6166 
   6167 	return (sc->sc_nqueues > 1);
   6168 }
   6169 
   6170 static int
   6171 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6172 {
   6173 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6174 
   6175 	wmq->wmq_id = qidx;
   6176 	wmq->wmq_intr_idx = intr_idx;
   6177 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6178 	    wm_handle_queue, wmq);
   6179 	if (wmq->wmq_si != NULL)
   6180 		return 0;
   6181 
   6182 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6183 	    wmq->wmq_id);
   6184 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6185 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6186 	return ENOMEM;
   6187 }
   6188 
   6189 /*
   6190  * Both single interrupt MSI and INTx can use this function.
   6191  */
   6192 static int
   6193 wm_setup_legacy(struct wm_softc *sc)
   6194 {
   6195 	pci_chipset_tag_t pc = sc->sc_pc;
   6196 	const char *intrstr = NULL;
   6197 	char intrbuf[PCI_INTRSTR_LEN];
   6198 	int error;
   6199 
   6200 	error = wm_alloc_txrx_queues(sc);
   6201 	if (error) {
   6202 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6203 		    error);
   6204 		return ENOMEM;
   6205 	}
   6206 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6207 	    sizeof(intrbuf));
   6208 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6209 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6210 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6211 	if (sc->sc_ihs[0] == NULL) {
   6212 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6213 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6214 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6215 		return ENOMEM;
   6216 	}
   6217 
   6218 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6219 	sc->sc_nintrs = 1;
   6220 
   6221 	return wm_softint_establish_queue(sc, 0, 0);
   6222 }
   6223 
   6224 static int
   6225 wm_setup_msix(struct wm_softc *sc)
   6226 {
   6227 	void *vih;
   6228 	kcpuset_t *affinity;
   6229 	int qidx, error, intr_idx, txrx_established;
   6230 	pci_chipset_tag_t pc = sc->sc_pc;
   6231 	const char *intrstr = NULL;
   6232 	char intrbuf[PCI_INTRSTR_LEN];
   6233 	char intr_xname[INTRDEVNAMEBUF];
   6234 
   6235 	if (sc->sc_nqueues < ncpu) {
   6236 		/*
   6237 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6238 		 * interrupts start from CPU#1.
   6239 		 */
   6240 		sc->sc_affinity_offset = 1;
   6241 	} else {
   6242 		/*
   6243 		 * In this case, this device use all CPUs. So, we unify
   6244 		 * affinitied cpu_index to msix vector number for readability.
   6245 		 */
   6246 		sc->sc_affinity_offset = 0;
   6247 	}
   6248 
   6249 	error = wm_alloc_txrx_queues(sc);
   6250 	if (error) {
   6251 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6252 		    error);
   6253 		return ENOMEM;
   6254 	}
   6255 
   6256 	kcpuset_create(&affinity, false);
   6257 	intr_idx = 0;
   6258 
   6259 	/*
   6260 	 * TX and RX
   6261 	 */
   6262 	txrx_established = 0;
   6263 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6264 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6265 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6266 
   6267 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6268 		    sizeof(intrbuf));
   6269 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6270 		    PCI_INTR_MPSAFE, true);
   6271 		memset(intr_xname, 0, sizeof(intr_xname));
   6272 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6273 		    device_xname(sc->sc_dev), qidx);
   6274 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6275 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6276 		if (vih == NULL) {
   6277 			aprint_error_dev(sc->sc_dev,
   6278 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6279 			    intrstr ? " at " : "",
   6280 			    intrstr ? intrstr : "");
   6281 
   6282 			goto fail;
   6283 		}
   6284 		kcpuset_zero(affinity);
   6285 		/* Round-robin affinity */
   6286 		kcpuset_set(affinity, affinity_to);
   6287 		error = interrupt_distribute(vih, affinity, NULL);
   6288 		if (error == 0) {
   6289 			aprint_normal_dev(sc->sc_dev,
   6290 			    "for TX and RX interrupting at %s affinity to %u\n",
   6291 			    intrstr, affinity_to);
   6292 		} else {
   6293 			aprint_normal_dev(sc->sc_dev,
   6294 			    "for TX and RX interrupting at %s\n", intrstr);
   6295 		}
   6296 		sc->sc_ihs[intr_idx] = vih;
   6297 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6298 			goto fail;
   6299 		txrx_established++;
   6300 		intr_idx++;
   6301 	}
   6302 
   6303 	/* LINK */
   6304 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6305 	    sizeof(intrbuf));
   6306 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6307 	memset(intr_xname, 0, sizeof(intr_xname));
   6308 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6309 	    device_xname(sc->sc_dev));
   6310 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6311 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6312 	if (vih == NULL) {
   6313 		aprint_error_dev(sc->sc_dev,
   6314 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6315 		    intrstr ? " at " : "",
   6316 		    intrstr ? intrstr : "");
   6317 
   6318 		goto fail;
   6319 	}
   6320 	/* Keep default affinity to LINK interrupt */
   6321 	aprint_normal_dev(sc->sc_dev,
   6322 	    "for LINK interrupting at %s\n", intrstr);
   6323 	sc->sc_ihs[intr_idx] = vih;
   6324 	sc->sc_link_intr_idx = intr_idx;
   6325 
   6326 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6327 	kcpuset_destroy(affinity);
   6328 	return 0;
   6329 
   6330 fail:
   6331 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6332 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6333 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6334 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6335 	}
   6336 
   6337 	kcpuset_destroy(affinity);
   6338 	return ENOMEM;
   6339 }
   6340 
   6341 static void
   6342 wm_unset_stopping_flags(struct wm_softc *sc)
   6343 {
   6344 	int i;
   6345 
   6346 	KASSERT(mutex_owned(sc->sc_core_lock));
   6347 
   6348 	/* Must unset stopping flags in ascending order. */
   6349 	for (i = 0; i < sc->sc_nqueues; i++) {
   6350 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6351 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6352 
   6353 		mutex_enter(txq->txq_lock);
   6354 		txq->txq_stopping = false;
   6355 		mutex_exit(txq->txq_lock);
   6356 
   6357 		mutex_enter(rxq->rxq_lock);
   6358 		rxq->rxq_stopping = false;
   6359 		mutex_exit(rxq->rxq_lock);
   6360 	}
   6361 
   6362 	sc->sc_core_stopping = false;
   6363 }
   6364 
   6365 static void
   6366 wm_set_stopping_flags(struct wm_softc *sc)
   6367 {
   6368 	int i;
   6369 
   6370 	KASSERT(mutex_owned(sc->sc_core_lock));
   6371 
   6372 	sc->sc_core_stopping = true;
   6373 
   6374 	/* Must set stopping flags in ascending order. */
   6375 	for (i = 0; i < sc->sc_nqueues; i++) {
   6376 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6377 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6378 
   6379 		mutex_enter(rxq->rxq_lock);
   6380 		rxq->rxq_stopping = true;
   6381 		mutex_exit(rxq->rxq_lock);
   6382 
   6383 		mutex_enter(txq->txq_lock);
   6384 		txq->txq_stopping = true;
   6385 		mutex_exit(txq->txq_lock);
   6386 	}
   6387 }
   6388 
   6389 /*
   6390  * Write interrupt interval value to ITR or EITR
   6391  */
   6392 static void
   6393 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6394 {
   6395 
   6396 	if (!wmq->wmq_set_itr)
   6397 		return;
   6398 
   6399 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6400 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6401 
   6402 		/*
   6403 		 * 82575 doesn't have CNT_INGR field.
   6404 		 * So, overwrite counter field by software.
   6405 		 */
   6406 		if (sc->sc_type == WM_T_82575)
   6407 			eitr |= __SHIFTIN(wmq->wmq_itr,
   6408 			    EITR_COUNTER_MASK_82575);
   6409 		else
   6410 			eitr |= EITR_CNT_INGR;
   6411 
   6412 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6413 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6414 		/*
   6415 		 * 82574 has both ITR and EITR. SET EITR when we use
   6416 		 * the multi queue function with MSI-X.
   6417 		 */
   6418 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6419 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6420 	} else {
   6421 		KASSERT(wmq->wmq_id == 0);
   6422 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6423 	}
   6424 
   6425 	wmq->wmq_set_itr = false;
   6426 }
   6427 
   6428 /*
   6429  * TODO
   6430  * Below dynamic calculation of itr is almost the same as Linux igb,
   6431  * however it does not fit to wm(4). So, we will have been disable AIM
   6432  * until we will find appropriate calculation of itr.
   6433  */
   6434 /*
   6435  * Calculate interrupt interval value to be going to write register in
   6436  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6437  */
   6438 static void
   6439 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6440 {
   6441 #ifdef NOTYET
   6442 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6443 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6444 	uint32_t avg_size = 0;
   6445 	uint32_t new_itr;
   6446 
   6447 	if (rxq->rxq_packets)
   6448 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6449 	if (txq->txq_packets)
   6450 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6451 
   6452 	if (avg_size == 0) {
   6453 		new_itr = 450; /* restore default value */
   6454 		goto out;
   6455 	}
   6456 
   6457 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6458 	avg_size += 24;
   6459 
   6460 	/* Don't starve jumbo frames */
   6461 	avg_size = uimin(avg_size, 3000);
   6462 
   6463 	/* Give a little boost to mid-size frames */
   6464 	if ((avg_size > 300) && (avg_size < 1200))
   6465 		new_itr = avg_size / 3;
   6466 	else
   6467 		new_itr = avg_size / 2;
   6468 
   6469 out:
   6470 	/*
   6471 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6472 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6473 	 */
   6474 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6475 		new_itr *= 4;
   6476 
   6477 	if (new_itr != wmq->wmq_itr) {
   6478 		wmq->wmq_itr = new_itr;
   6479 		wmq->wmq_set_itr = true;
   6480 	} else
   6481 		wmq->wmq_set_itr = false;
   6482 
   6483 	rxq->rxq_packets = 0;
   6484 	rxq->rxq_bytes = 0;
   6485 	txq->txq_packets = 0;
   6486 	txq->txq_bytes = 0;
   6487 #endif
   6488 }
   6489 
   6490 static void
   6491 wm_init_sysctls(struct wm_softc *sc)
   6492 {
   6493 	struct sysctllog **log;
   6494 	const struct sysctlnode *rnode, *qnode, *cnode;
   6495 	int i, rv;
   6496 	const char *dvname;
   6497 
   6498 	log = &sc->sc_sysctllog;
   6499 	dvname = device_xname(sc->sc_dev);
   6500 
   6501 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6502 	    0, CTLTYPE_NODE, dvname,
   6503 	    SYSCTL_DESCR("wm information and settings"),
   6504 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6505 	if (rv != 0)
   6506 		goto err;
   6507 
   6508 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6509 	    CTLTYPE_BOOL, "txrx_workqueue",
   6510 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   6511 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6512 	if (rv != 0)
   6513 		goto teardown;
   6514 
   6515 	for (i = 0; i < sc->sc_nqueues; i++) {
   6516 		struct wm_queue *wmq = &sc->sc_queue[i];
   6517 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6518 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6519 
   6520 		snprintf(sc->sc_queue[i].sysctlname,
   6521 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6522 
   6523 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6524 		    0, CTLTYPE_NODE,
   6525 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6526 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6527 			break;
   6528 
   6529 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6530 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6531 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6532 		    NULL, 0, &txq->txq_free,
   6533 		    0, CTL_CREATE, CTL_EOL) != 0)
   6534 			break;
   6535 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6536 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6537 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6538 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6539 		    0, CTL_CREATE, CTL_EOL) != 0)
   6540 			break;
   6541 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6542 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6543 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6544 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6545 		    0, CTL_CREATE, CTL_EOL) != 0)
   6546 			break;
   6547 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6548 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6549 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6550 		    NULL, 0, &txq->txq_next,
   6551 		    0, CTL_CREATE, CTL_EOL) != 0)
   6552 			break;
   6553 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6554 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6555 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6556 		    NULL, 0, &txq->txq_sfree,
   6557 		    0, CTL_CREATE, CTL_EOL) != 0)
   6558 			break;
   6559 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6560 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6561 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6562 		    NULL, 0, &txq->txq_snext,
   6563 		    0, CTL_CREATE, CTL_EOL) != 0)
   6564 			break;
   6565 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6566 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6567 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6568 		    NULL, 0, &txq->txq_sdirty,
   6569 		    0, CTL_CREATE, CTL_EOL) != 0)
   6570 			break;
   6571 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6572 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6573 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6574 		    NULL, 0, &txq->txq_flags,
   6575 		    0, CTL_CREATE, CTL_EOL) != 0)
   6576 			break;
   6577 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6578 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6579 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6580 		    NULL, 0, &txq->txq_stopping,
   6581 		    0, CTL_CREATE, CTL_EOL) != 0)
   6582 			break;
   6583 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6584 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6585 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6586 		    NULL, 0, &txq->txq_sending,
   6587 		    0, CTL_CREATE, CTL_EOL) != 0)
   6588 			break;
   6589 
   6590 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6591 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6592 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6593 		    NULL, 0, &rxq->rxq_ptr,
   6594 		    0, CTL_CREATE, CTL_EOL) != 0)
   6595 			break;
   6596 	}
   6597 
   6598 #ifdef WM_DEBUG
   6599 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6600 	    CTLTYPE_INT, "debug_flags",
   6601 	    SYSCTL_DESCR(
   6602 		    "Debug flags:\n"	\
   6603 		    "\t0x01 LINK\n"	\
   6604 		    "\t0x02 TX\n"	\
   6605 		    "\t0x04 RX\n"	\
   6606 		    "\t0x08 GMII\n"	\
   6607 		    "\t0x10 MANAGE\n"	\
   6608 		    "\t0x20 NVM\n"	\
   6609 		    "\t0x40 INIT\n"	\
   6610 		    "\t0x80 LOCK"),
   6611 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6612 	if (rv != 0)
   6613 		goto teardown;
   6614 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6615 	    CTLTYPE_BOOL, "trigger_reset",
   6616 	    SYSCTL_DESCR("Trigger an interface reset"),
   6617 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6618 	if (rv != 0)
   6619 		goto teardown;
   6620 #endif
   6621 
   6622 	return;
   6623 
   6624 teardown:
   6625 	sysctl_teardown(log);
   6626 err:
   6627 	sc->sc_sysctllog = NULL;
   6628 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6629 	    __func__, rv);
   6630 }
   6631 
   6632 static void
   6633 wm_update_stats(struct wm_softc *sc)
   6634 {
   6635 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6636 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   6637 	    cexterr;
   6638 
   6639 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   6640 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   6641 	mpc = CSR_READ(sc, WMREG_MPC);
   6642 	colc = CSR_READ(sc, WMREG_COLC);
   6643 	sec = CSR_READ(sc, WMREG_SEC);
   6644 	rlec = CSR_READ(sc, WMREG_RLEC);
   6645 
   6646 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   6647 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   6648 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   6649 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   6650 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   6651 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   6652 
   6653 	if (sc->sc_type >= WM_T_82543) {
   6654 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   6655 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   6656 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   6657 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   6658 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
   6659 			cexterr = CSR_READ(sc, WMREG_CEXTERR);
   6660 			WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   6661 		} else {
   6662 			cexterr = 0;
   6663 			/* Excessive collision + Link down */
   6664 			WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
   6665 			    CSR_READ(sc, WMREG_HTDPMC));
   6666 		}
   6667 
   6668 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   6669 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   6670 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6671 			WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
   6672 			    CSR_READ(sc, WMREG_TSCTFC));
   6673 		else {
   6674 			WM_EVCNT_ADD(&sc->sc_ev_cbrdpc,
   6675 			    CSR_READ(sc, WMREG_CBRDPC));
   6676 			WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
   6677 			    CSR_READ(sc, WMREG_CBRMPC));
   6678 		}
   6679 	} else
   6680 		algnerrc = rxerrc = cexterr = 0;
   6681 
   6682 	if (sc->sc_type >= WM_T_82542_2_1) {
   6683 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   6684 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   6685 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   6686 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   6687 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   6688 	}
   6689 
   6690 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   6691 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   6692 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   6693 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   6694 
   6695 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6696 		WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
   6697 	}
   6698 
   6699 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   6700 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   6701 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   6702 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   6703 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   6704 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   6705 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   6706 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   6707 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   6708 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   6709 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   6710 
   6711 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   6712 	    CSR_READ(sc, WMREG_GORCL) +
   6713 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
   6714 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   6715 	    CSR_READ(sc, WMREG_GOTCL) +
   6716 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
   6717 
   6718 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   6719 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   6720 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   6721 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   6722 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   6723 
   6724 	if (sc->sc_type >= WM_T_82540) {
   6725 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   6726 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   6727 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   6728 	}
   6729 
   6730 	/*
   6731 	 * The TOR(L) register includes:
   6732 	 *  - Error
   6733 	 *  - Flow control
   6734 	 *  - Broadcast rejected (This note is described in 82574 and newer
   6735 	 *    datasheets. What does "broadcast rejected" mean?)
   6736 	 */
   6737 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   6738 	    CSR_READ(sc, WMREG_TORL) +
   6739 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
   6740 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   6741 	    CSR_READ(sc, WMREG_TOTL) +
   6742 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
   6743 
   6744 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   6745 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   6746 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   6747 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   6748 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   6749 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   6750 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   6751 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   6752 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   6753 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   6754 	if (sc->sc_type >= WM_T_82571)
   6755 		WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   6756 	if (sc->sc_type < WM_T_82575) {
   6757 		WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   6758 		WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   6759 		WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   6760 		WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC));
   6761 		WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   6762 		WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
   6763 		    CSR_READ(sc, WMREG_ICTXQMTC));
   6764 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
   6765 		    CSR_READ(sc, WMREG_ICRXDMTC));
   6766 		WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   6767 	} else if (!WM_IS_ICHPCH(sc)) {
   6768 		WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
   6769 		WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
   6770 		WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
   6771 		WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
   6772 		WM_EVCNT_ADD(&sc->sc_ev_hgptc,  CSR_READ(sc, WMREG_HGPTC));
   6773 		WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
   6774 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
   6775 		WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
   6776 
   6777 		WM_EVCNT_ADD(&sc->sc_ev_hgorc,
   6778 		    CSR_READ(sc, WMREG_HGORCL) +
   6779 		    ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
   6780 		WM_EVCNT_ADD(&sc->sc_ev_hgotc,
   6781 		    CSR_READ(sc, WMREG_HGOTCL) +
   6782 		    ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
   6783 		WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
   6784 		WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
   6785 		WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
   6786 	}
   6787 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6788 		WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
   6789 		WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
   6790 		if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
   6791 			WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
   6792 			    CSR_READ(sc, WMREG_B2OGPRC));
   6793 			WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
   6794 			    CSR_READ(sc, WMREG_O2BSPC));
   6795 			WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
   6796 			    CSR_READ(sc, WMREG_B2OSPC));
   6797 			WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
   6798 			    CSR_READ(sc, WMREG_O2BGPTC));
   6799 		}
   6800 	}
   6801 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   6802 	if_statadd_ref(nsr, if_collisions, colc);
   6803 	if_statadd_ref(nsr, if_ierrors,
   6804 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   6805 	/*
   6806 	 * WMREG_RNBC is incremented when there are no available buffers in
   6807 	 * host memory. It does not mean the number of dropped packets, because
   6808 	 * an Ethernet controller can receive packets in such case if there is
   6809 	 * space in the phy's FIFO.
   6810 	 *
   6811 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   6812 	 * own EVCNT instead of if_iqdrops.
   6813 	 */
   6814 	if_statadd_ref(nsr, if_iqdrops, mpc);
   6815 	IF_STAT_PUTREF(ifp);
   6816 }
   6817 
   6818 void
   6819 wm_clear_evcnt(struct wm_softc *sc)
   6820 {
   6821 #ifdef WM_EVENT_COUNTERS
   6822 	int i;
   6823 
   6824 	/* RX queues */
   6825 	for (i = 0; i < sc->sc_nqueues; i++) {
   6826 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6827 
   6828 		WM_Q_EVCNT_STORE(rxq, intr, 0);
   6829 		WM_Q_EVCNT_STORE(rxq, defer, 0);
   6830 		WM_Q_EVCNT_STORE(rxq, ipsum, 0);
   6831 		WM_Q_EVCNT_STORE(rxq, tusum, 0);
   6832 	}
   6833 
   6834 	/* TX queues */
   6835 	for (i = 0; i < sc->sc_nqueues; i++) {
   6836 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6837 		int j;
   6838 
   6839 		WM_Q_EVCNT_STORE(txq, txsstall, 0);
   6840 		WM_Q_EVCNT_STORE(txq, txdstall, 0);
   6841 		WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
   6842 		WM_Q_EVCNT_STORE(txq, txdw, 0);
   6843 		WM_Q_EVCNT_STORE(txq, txqe, 0);
   6844 		WM_Q_EVCNT_STORE(txq, ipsum, 0);
   6845 		WM_Q_EVCNT_STORE(txq, tusum, 0);
   6846 		WM_Q_EVCNT_STORE(txq, tusum6, 0);
   6847 		WM_Q_EVCNT_STORE(txq, tso, 0);
   6848 		WM_Q_EVCNT_STORE(txq, tso6, 0);
   6849 		WM_Q_EVCNT_STORE(txq, tsopain, 0);
   6850 
   6851 		for (j = 0; j < WM_NTXSEGS; j++)
   6852 			WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
   6853 
   6854 		WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
   6855 		WM_Q_EVCNT_STORE(txq, descdrop, 0);
   6856 		WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
   6857 		WM_Q_EVCNT_STORE(txq, defrag, 0);
   6858 		if (sc->sc_type <= WM_T_82544)
   6859 			WM_Q_EVCNT_STORE(txq, underrun, 0);
   6860 		WM_Q_EVCNT_STORE(txq, skipcontext, 0);
   6861 	}
   6862 
   6863 	/* Miscs */
   6864 	WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0);
   6865 
   6866 	WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0);
   6867 	WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0);
   6868 	WM_EVCNT_STORE(&sc->sc_ev_mpc, 0);
   6869 	WM_EVCNT_STORE(&sc->sc_ev_colc, 0);
   6870 	WM_EVCNT_STORE(&sc->sc_ev_sec, 0);
   6871 	WM_EVCNT_STORE(&sc->sc_ev_rlec, 0);
   6872 
   6873 	if (sc->sc_type >= WM_T_82543) {
   6874 		WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0);
   6875 		WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0);
   6876 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6877 			WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0);
   6878 		else
   6879 			WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0);
   6880 
   6881 		WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0);
   6882 		WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0);
   6883 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   6884 			WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0);
   6885 		else {
   6886 			WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0);
   6887 			WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0);
   6888 		}
   6889 	}
   6890 
   6891 	if (sc->sc_type >= WM_T_82542_2_1) {
   6892 		WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0);
   6893 		WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0);
   6894 		WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0);
   6895 		WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0);
   6896 		WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0);
   6897 	}
   6898 
   6899 	WM_EVCNT_STORE(&sc->sc_ev_scc, 0);
   6900 	WM_EVCNT_STORE(&sc->sc_ev_ecol, 0);
   6901 	WM_EVCNT_STORE(&sc->sc_ev_mcc, 0);
   6902 	WM_EVCNT_STORE(&sc->sc_ev_latecol, 0);
   6903 
   6904 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   6905 		WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0);
   6906 
   6907 	WM_EVCNT_STORE(&sc->sc_ev_dc, 0);
   6908 	WM_EVCNT_STORE(&sc->sc_ev_prc64, 0);
   6909 	WM_EVCNT_STORE(&sc->sc_ev_prc127, 0);
   6910 	WM_EVCNT_STORE(&sc->sc_ev_prc255, 0);
   6911 	WM_EVCNT_STORE(&sc->sc_ev_prc511, 0);
   6912 	WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0);
   6913 	WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0);
   6914 	WM_EVCNT_STORE(&sc->sc_ev_gprc, 0);
   6915 	WM_EVCNT_STORE(&sc->sc_ev_bprc, 0);
   6916 	WM_EVCNT_STORE(&sc->sc_ev_mprc, 0);
   6917 	WM_EVCNT_STORE(&sc->sc_ev_gptc, 0);
   6918 	WM_EVCNT_STORE(&sc->sc_ev_gorc, 0);
   6919 	WM_EVCNT_STORE(&sc->sc_ev_gotc, 0);
   6920 	WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0);
   6921 	WM_EVCNT_STORE(&sc->sc_ev_ruc, 0);
   6922 	WM_EVCNT_STORE(&sc->sc_ev_rfc, 0);
   6923 	WM_EVCNT_STORE(&sc->sc_ev_roc, 0);
   6924 	WM_EVCNT_STORE(&sc->sc_ev_rjc, 0);
   6925 	if (sc->sc_type >= WM_T_82540) {
   6926 		WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0);
   6927 		WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0);
   6928 		WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0);
   6929 	}
   6930 	WM_EVCNT_STORE(&sc->sc_ev_tor, 0);
   6931 	WM_EVCNT_STORE(&sc->sc_ev_tot, 0);
   6932 	WM_EVCNT_STORE(&sc->sc_ev_tpr, 0);
   6933 	WM_EVCNT_STORE(&sc->sc_ev_tpt, 0);
   6934 	WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0);
   6935 	WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0);
   6936 	WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0);
   6937 	WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0);
   6938 	WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0);
   6939 	WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0);
   6940 	WM_EVCNT_STORE(&sc->sc_ev_mptc, 0);
   6941 	WM_EVCNT_STORE(&sc->sc_ev_bptc, 0);
   6942 	if (sc->sc_type >= WM_T_82571)
   6943 		WM_EVCNT_STORE(&sc->sc_ev_iac, 0);
   6944 	if (sc->sc_type < WM_T_82575) {
   6945 		WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0);
   6946 		WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0);
   6947 		WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0);
   6948 		WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0);
   6949 		WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0);
   6950 		WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0);
   6951 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6952 		WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0);
   6953 	} else if (!WM_IS_ICHPCH(sc)) {
   6954 		WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0);
   6955 		WM_EVCNT_STORE(&sc->sc_ev_debug1, 0);
   6956 		WM_EVCNT_STORE(&sc->sc_ev_debug2, 0);
   6957 		WM_EVCNT_STORE(&sc->sc_ev_debug3, 0);
   6958 		WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0);
   6959 		WM_EVCNT_STORE(&sc->sc_ev_debug4, 0);
   6960 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
   6961 		WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0);
   6962 
   6963 		WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0);
   6964 		WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0);
   6965 		WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0);
   6966 		WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0);
   6967 		WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0);
   6968 	}
   6969 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   6970 		WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0);
   6971 		WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0);
   6972 		WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0);
   6973 		WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0);
   6974 		WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0);
   6975 		WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0);
   6976 	}
   6977 #endif
   6978 }
   6979 
   6980 /*
   6981  * wm_init:		[ifnet interface function]
   6982  *
   6983  *	Initialize the interface.
   6984  */
   6985 static int
   6986 wm_init(struct ifnet *ifp)
   6987 {
   6988 	struct wm_softc *sc = ifp->if_softc;
   6989 	int ret;
   6990 
   6991 	KASSERT(IFNET_LOCKED(ifp));
   6992 
   6993 	if (sc->sc_dying)
   6994 		return ENXIO;
   6995 
   6996 	mutex_enter(sc->sc_core_lock);
   6997 	ret = wm_init_locked(ifp);
   6998 	mutex_exit(sc->sc_core_lock);
   6999 
   7000 	return ret;
   7001 }
   7002 
   7003 static int
   7004 wm_init_locked(struct ifnet *ifp)
   7005 {
   7006 	struct wm_softc *sc = ifp->if_softc;
   7007 	struct ethercom *ec = &sc->sc_ethercom;
   7008 	int i, j, trynum, error = 0;
   7009 	uint32_t reg, sfp_mask = 0;
   7010 
   7011 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7012 		device_xname(sc->sc_dev), __func__));
   7013 	KASSERT(IFNET_LOCKED(ifp));
   7014 	KASSERT(mutex_owned(sc->sc_core_lock));
   7015 
   7016 	/*
   7017 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   7018 	 * There is a small but measurable benefit to avoiding the adjusment
   7019 	 * of the descriptor so that the headers are aligned, for normal mtu,
   7020 	 * on such platforms.  One possibility is that the DMA itself is
   7021 	 * slightly more efficient if the front of the entire packet (instead
   7022 	 * of the front of the headers) is aligned.
   7023 	 *
   7024 	 * Note we must always set align_tweak to 0 if we are using
   7025 	 * jumbo frames.
   7026 	 */
   7027 #ifdef __NO_STRICT_ALIGNMENT
   7028 	sc->sc_align_tweak = 0;
   7029 #else
   7030 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   7031 		sc->sc_align_tweak = 0;
   7032 	else
   7033 		sc->sc_align_tweak = 2;
   7034 #endif /* __NO_STRICT_ALIGNMENT */
   7035 
   7036 	/* Cancel any pending I/O. */
   7037 	wm_stop_locked(ifp, false, false);
   7038 
   7039 	/* Update statistics before reset */
   7040 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   7041 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   7042 
   7043 	/* >= PCH_SPT hardware workaround before reset. */
   7044 	if (sc->sc_type >= WM_T_PCH_SPT)
   7045 		wm_flush_desc_rings(sc);
   7046 
   7047 	/* Reset the chip to a known state. */
   7048 	wm_reset(sc);
   7049 
   7050 	/*
   7051 	 * AMT based hardware can now take control from firmware
   7052 	 * Do this after reset.
   7053 	 */
   7054 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   7055 		wm_get_hw_control(sc);
   7056 
   7057 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   7058 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   7059 		wm_legacy_irq_quirk_spt(sc);
   7060 
   7061 	/* Init hardware bits */
   7062 	wm_initialize_hardware_bits(sc);
   7063 
   7064 	/* Reset the PHY. */
   7065 	if (sc->sc_flags & WM_F_HAS_MII)
   7066 		wm_gmii_reset(sc);
   7067 
   7068 	if (sc->sc_type >= WM_T_ICH8) {
   7069 		reg = CSR_READ(sc, WMREG_GCR);
   7070 		/*
   7071 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   7072 		 * default after reset.
   7073 		 */
   7074 		if (sc->sc_type == WM_T_ICH8)
   7075 			reg |= GCR_NO_SNOOP_ALL;
   7076 		else
   7077 			reg &= ~GCR_NO_SNOOP_ALL;
   7078 		CSR_WRITE(sc, WMREG_GCR, reg);
   7079 	}
   7080 
   7081 	/* Ungate DMA clock to avoid packet loss */
   7082 	if (sc->sc_type >= WM_T_PCH_TGP) {
   7083 		reg = CSR_READ(sc, WMREG_FFLT_DBG);
   7084 		reg |= (1 << 12);
   7085 		CSR_WRITE(sc, WMREG_FFLT_DBG, reg);
   7086 	}
   7087 
   7088 	if ((sc->sc_type >= WM_T_ICH8)
   7089 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   7090 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   7091 
   7092 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7093 		reg |= CTRL_EXT_RO_DIS;
   7094 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7095 	}
   7096 
   7097 	/* Calculate (E)ITR value */
   7098 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   7099 		/*
   7100 		 * For NEWQUEUE's EITR (except for 82575).
   7101 		 * 82575's EITR should be set same throttling value as other
   7102 		 * old controllers' ITR because the interrupt/sec calculation
   7103 		 * is the same, that is, 1,000,000,000 / (N * 256).
   7104 		 *
   7105 		 * 82574's EITR should be set same throttling value as ITR.
   7106 		 *
   7107 		 * For N interrupts/sec, set this value to:
   7108 		 * 1,000,000 / N in contrast to ITR throttling value.
   7109 		 */
   7110 		sc->sc_itr_init = 450;
   7111 	} else if (sc->sc_type >= WM_T_82543) {
   7112 		/*
   7113 		 * Set up the interrupt throttling register (units of 256ns)
   7114 		 * Note that a footnote in Intel's documentation says this
   7115 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   7116 		 * or 10Mbit mode.  Empirically, it appears to be the case
   7117 		 * that that is also true for the 1024ns units of the other
   7118 		 * interrupt-related timer registers -- so, really, we ought
   7119 		 * to divide this value by 4 when the link speed is low.
   7120 		 *
   7121 		 * XXX implement this division at link speed change!
   7122 		 */
   7123 
   7124 		/*
   7125 		 * For N interrupts/sec, set this value to:
   7126 		 * 1,000,000,000 / (N * 256).  Note that we set the
   7127 		 * absolute and packet timer values to this value
   7128 		 * divided by 4 to get "simple timer" behavior.
   7129 		 */
   7130 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   7131 	}
   7132 
   7133 	error = wm_init_txrx_queues(sc);
   7134 	if (error)
   7135 		goto out;
   7136 
   7137 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   7138 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   7139 	    (sc->sc_type >= WM_T_82575))
   7140 		wm_serdes_power_up_link_82575(sc);
   7141 
   7142 	/* Clear out the VLAN table -- we don't use it (yet). */
   7143 	CSR_WRITE(sc, WMREG_VET, 0);
   7144 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   7145 		trynum = 10; /* Due to hw errata */
   7146 	else
   7147 		trynum = 1;
   7148 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   7149 		for (j = 0; j < trynum; j++)
   7150 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   7151 
   7152 	/*
   7153 	 * Set up flow-control parameters.
   7154 	 *
   7155 	 * XXX Values could probably stand some tuning.
   7156 	 */
   7157 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   7158 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   7159 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   7160 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)
   7161 	    && (sc->sc_type != WM_T_PCH_TGP)) {
   7162 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   7163 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   7164 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   7165 	}
   7166 
   7167 	sc->sc_fcrtl = FCRTL_DFLT;
   7168 	if (sc->sc_type < WM_T_82543) {
   7169 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   7170 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   7171 	} else {
   7172 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   7173 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   7174 	}
   7175 
   7176 	if (sc->sc_type == WM_T_80003)
   7177 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   7178 	else
   7179 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   7180 
   7181 	/* Writes the control register. */
   7182 	wm_set_vlan(sc);
   7183 
   7184 	if (sc->sc_flags & WM_F_HAS_MII) {
   7185 		uint16_t kmreg;
   7186 
   7187 		switch (sc->sc_type) {
   7188 		case WM_T_80003:
   7189 		case WM_T_ICH8:
   7190 		case WM_T_ICH9:
   7191 		case WM_T_ICH10:
   7192 		case WM_T_PCH:
   7193 		case WM_T_PCH2:
   7194 		case WM_T_PCH_LPT:
   7195 		case WM_T_PCH_SPT:
   7196 		case WM_T_PCH_CNP:
   7197 		case WM_T_PCH_TGP:
   7198 			/*
   7199 			 * Set the mac to wait the maximum time between each
   7200 			 * iteration and increase the max iterations when
   7201 			 * polling the phy; this fixes erroneous timeouts at
   7202 			 * 10Mbps.
   7203 			 */
   7204 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   7205 			    0xFFFF);
   7206 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7207 			    &kmreg);
   7208 			kmreg |= 0x3F;
   7209 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   7210 			    kmreg);
   7211 			break;
   7212 		default:
   7213 			break;
   7214 		}
   7215 
   7216 		if (sc->sc_type == WM_T_80003) {
   7217 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7218 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   7219 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7220 
   7221 			/* Bypass RX and TX FIFOs */
   7222 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   7223 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   7224 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   7225 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   7226 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   7227 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   7228 		}
   7229 	}
   7230 #if 0
   7231 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   7232 #endif
   7233 
   7234 	/* Set up checksum offload parameters. */
   7235 	reg = CSR_READ(sc, WMREG_RXCSUM);
   7236 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   7237 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   7238 		reg |= RXCSUM_IPOFL;
   7239 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   7240 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   7241 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   7242 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   7243 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7244 
   7245 	/* Set registers about MSI-X */
   7246 	if (wm_is_using_msix(sc)) {
   7247 		uint32_t ivar, qintr_idx;
   7248 		struct wm_queue *wmq;
   7249 		unsigned int qid;
   7250 
   7251 		if (sc->sc_type == WM_T_82575) {
   7252 			/* Interrupt control */
   7253 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7254 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   7255 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7256 
   7257 			/* TX and RX */
   7258 			for (i = 0; i < sc->sc_nqueues; i++) {
   7259 				wmq = &sc->sc_queue[i];
   7260 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   7261 				    EITR_TX_QUEUE(wmq->wmq_id)
   7262 				    | EITR_RX_QUEUE(wmq->wmq_id));
   7263 			}
   7264 			/* Link status */
   7265 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   7266 			    EITR_OTHER);
   7267 		} else if (sc->sc_type == WM_T_82574) {
   7268 			/* Interrupt control */
   7269 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7270 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   7271 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7272 
   7273 			/*
   7274 			 * Work around issue with spurious interrupts
   7275 			 * in MSI-X mode.
   7276 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   7277 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   7278 			 */
   7279 			reg = CSR_READ(sc, WMREG_RFCTL);
   7280 			reg |= WMREG_RFCTL_ACKDIS;
   7281 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   7282 
   7283 			ivar = 0;
   7284 			/* TX and RX */
   7285 			for (i = 0; i < sc->sc_nqueues; i++) {
   7286 				wmq = &sc->sc_queue[i];
   7287 				qid = wmq->wmq_id;
   7288 				qintr_idx = wmq->wmq_intr_idx;
   7289 
   7290 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7291 				    IVAR_TX_MASK_Q_82574(qid));
   7292 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7293 				    IVAR_RX_MASK_Q_82574(qid));
   7294 			}
   7295 			/* Link status */
   7296 			ivar |= __SHIFTIN((IVAR_VALID_82574
   7297 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   7298 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   7299 		} else {
   7300 			/* Interrupt control */
   7301 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   7302 			    | GPIE_EIAME | GPIE_PBA);
   7303 
   7304 			switch (sc->sc_type) {
   7305 			case WM_T_82580:
   7306 			case WM_T_I350:
   7307 			case WM_T_I354:
   7308 			case WM_T_I210:
   7309 			case WM_T_I211:
   7310 				/* TX and RX */
   7311 				for (i = 0; i < sc->sc_nqueues; i++) {
   7312 					wmq = &sc->sc_queue[i];
   7313 					qid = wmq->wmq_id;
   7314 					qintr_idx = wmq->wmq_intr_idx;
   7315 
   7316 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   7317 					ivar &= ~IVAR_TX_MASK_Q(qid);
   7318 					ivar |= __SHIFTIN((qintr_idx
   7319 						| IVAR_VALID),
   7320 					    IVAR_TX_MASK_Q(qid));
   7321 					ivar &= ~IVAR_RX_MASK_Q(qid);
   7322 					ivar |= __SHIFTIN((qintr_idx
   7323 						| IVAR_VALID),
   7324 					    IVAR_RX_MASK_Q(qid));
   7325 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   7326 				}
   7327 				break;
   7328 			case WM_T_82576:
   7329 				/* TX and RX */
   7330 				for (i = 0; i < sc->sc_nqueues; i++) {
   7331 					wmq = &sc->sc_queue[i];
   7332 					qid = wmq->wmq_id;
   7333 					qintr_idx = wmq->wmq_intr_idx;
   7334 
   7335 					ivar = CSR_READ(sc,
   7336 					    WMREG_IVAR_Q_82576(qid));
   7337 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   7338 					ivar |= __SHIFTIN((qintr_idx
   7339 						| IVAR_VALID),
   7340 					    IVAR_TX_MASK_Q_82576(qid));
   7341 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   7342 					ivar |= __SHIFTIN((qintr_idx
   7343 						| IVAR_VALID),
   7344 					    IVAR_RX_MASK_Q_82576(qid));
   7345 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   7346 					    ivar);
   7347 				}
   7348 				break;
   7349 			default:
   7350 				break;
   7351 			}
   7352 
   7353 			/* Link status */
   7354 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   7355 			    IVAR_MISC_OTHER);
   7356 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   7357 		}
   7358 
   7359 		if (wm_is_using_multiqueue(sc)) {
   7360 			wm_init_rss(sc);
   7361 
   7362 			/*
   7363 			** NOTE: Receive Full-Packet Checksum Offload
   7364 			** is mutually exclusive with Multiqueue. However
   7365 			** this is not the same as TCP/IP checksums which
   7366 			** still work.
   7367 			*/
   7368 			reg = CSR_READ(sc, WMREG_RXCSUM);
   7369 			reg |= RXCSUM_PCSD;
   7370 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7371 		}
   7372 	}
   7373 
   7374 	/* Set up the interrupt registers. */
   7375 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7376 
   7377 	/* Enable SFP module insertion interrupt if it's required */
   7378 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   7379 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   7380 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7381 		sfp_mask = ICR_GPI(0);
   7382 	}
   7383 
   7384 	if (wm_is_using_msix(sc)) {
   7385 		uint32_t mask;
   7386 		struct wm_queue *wmq;
   7387 
   7388 		switch (sc->sc_type) {
   7389 		case WM_T_82574:
   7390 			mask = 0;
   7391 			for (i = 0; i < sc->sc_nqueues; i++) {
   7392 				wmq = &sc->sc_queue[i];
   7393 				mask |= ICR_TXQ(wmq->wmq_id);
   7394 				mask |= ICR_RXQ(wmq->wmq_id);
   7395 			}
   7396 			mask |= ICR_OTHER;
   7397 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   7398 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   7399 			break;
   7400 		default:
   7401 			if (sc->sc_type == WM_T_82575) {
   7402 				mask = 0;
   7403 				for (i = 0; i < sc->sc_nqueues; i++) {
   7404 					wmq = &sc->sc_queue[i];
   7405 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   7406 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   7407 				}
   7408 				mask |= EITR_OTHER;
   7409 			} else {
   7410 				mask = 0;
   7411 				for (i = 0; i < sc->sc_nqueues; i++) {
   7412 					wmq = &sc->sc_queue[i];
   7413 					mask |= 1 << wmq->wmq_intr_idx;
   7414 				}
   7415 				mask |= 1 << sc->sc_link_intr_idx;
   7416 			}
   7417 			CSR_WRITE(sc, WMREG_EIAC, mask);
   7418 			CSR_WRITE(sc, WMREG_EIAM, mask);
   7419 			CSR_WRITE(sc, WMREG_EIMS, mask);
   7420 
   7421 			/* For other interrupts */
   7422 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   7423 			break;
   7424 		}
   7425 	} else {
   7426 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   7427 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   7428 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   7429 	}
   7430 
   7431 	/* Set up the inter-packet gap. */
   7432 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7433 
   7434 	if (sc->sc_type >= WM_T_82543) {
   7435 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7436 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   7437 			wm_itrs_writereg(sc, wmq);
   7438 		}
   7439 		/*
   7440 		 * Link interrupts occur much less than TX
   7441 		 * interrupts and RX interrupts. So, we don't
   7442 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   7443 		 * FreeBSD's if_igb.
   7444 		 */
   7445 	}
   7446 
   7447 	/* Set the VLAN EtherType. */
   7448 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   7449 
   7450 	/*
   7451 	 * Set up the transmit control register; we start out with
   7452 	 * a collision distance suitable for FDX, but update it when
   7453 	 * we resolve the media type.
   7454 	 */
   7455 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   7456 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   7457 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7458 	if (sc->sc_type >= WM_T_82571)
   7459 		sc->sc_tctl |= TCTL_MULR;
   7460 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7461 
   7462 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7463 		/* Write TDT after TCTL.EN is set. See the document. */
   7464 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   7465 	}
   7466 
   7467 	if (sc->sc_type == WM_T_80003) {
   7468 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   7469 		reg &= ~TCTL_EXT_GCEX_MASK;
   7470 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   7471 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   7472 	}
   7473 
   7474 	/* Set the media. */
   7475 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   7476 		goto out;
   7477 
   7478 	/* Configure for OS presence */
   7479 	wm_init_manageability(sc);
   7480 
   7481 	/*
   7482 	 * Set up the receive control register; we actually program the
   7483 	 * register when we set the receive filter. Use multicast address
   7484 	 * offset type 0.
   7485 	 *
   7486 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7487 	 * don't enable that feature.
   7488 	 */
   7489 	sc->sc_mchash_type = 0;
   7490 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7491 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7492 
   7493 	/* 82574 use one buffer extended Rx descriptor. */
   7494 	if (sc->sc_type == WM_T_82574)
   7495 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7496 
   7497 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7498 		sc->sc_rctl |= RCTL_SECRC;
   7499 
   7500 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7501 	    && (ifp->if_mtu > ETHERMTU)) {
   7502 		sc->sc_rctl |= RCTL_LPE;
   7503 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7504 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7505 	}
   7506 
   7507 	if (MCLBYTES == 2048)
   7508 		sc->sc_rctl |= RCTL_2k;
   7509 	else {
   7510 		if (sc->sc_type >= WM_T_82543) {
   7511 			switch (MCLBYTES) {
   7512 			case 4096:
   7513 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7514 				break;
   7515 			case 8192:
   7516 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7517 				break;
   7518 			case 16384:
   7519 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7520 				break;
   7521 			default:
   7522 				panic("wm_init: MCLBYTES %d unsupported",
   7523 				    MCLBYTES);
   7524 				break;
   7525 			}
   7526 		} else
   7527 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7528 	}
   7529 
   7530 	/* Enable ECC */
   7531 	switch (sc->sc_type) {
   7532 	case WM_T_82571:
   7533 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7534 		reg |= PBA_ECC_CORR_EN;
   7535 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7536 		break;
   7537 	case WM_T_PCH_LPT:
   7538 	case WM_T_PCH_SPT:
   7539 	case WM_T_PCH_CNP:
   7540 	case WM_T_PCH_TGP:
   7541 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7542 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7543 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7544 
   7545 		sc->sc_ctrl |= CTRL_MEHE;
   7546 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7547 		break;
   7548 	default:
   7549 		break;
   7550 	}
   7551 
   7552 	/*
   7553 	 * Set the receive filter.
   7554 	 *
   7555 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7556 	 * the setting of RCTL.EN in wm_set_filter()
   7557 	 */
   7558 	wm_set_filter(sc);
   7559 
   7560 	/* On 575 and later set RDT only if RX enabled */
   7561 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7562 		int qidx;
   7563 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7564 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7565 			for (i = 0; i < WM_NRXDESC; i++) {
   7566 				mutex_enter(rxq->rxq_lock);
   7567 				wm_init_rxdesc(rxq, i);
   7568 				mutex_exit(rxq->rxq_lock);
   7569 
   7570 			}
   7571 		}
   7572 	}
   7573 
   7574 	wm_unset_stopping_flags(sc);
   7575 
   7576 	/* Start the one second link check clock. */
   7577 	callout_schedule(&sc->sc_tick_ch, hz);
   7578 
   7579 	/*
   7580 	 * ...all done! (IFNET_LOCKED asserted above.)
   7581 	 */
   7582 	ifp->if_flags |= IFF_RUNNING;
   7583 
   7584 out:
   7585 	/* Save last flags for the callback */
   7586 	sc->sc_if_flags = ifp->if_flags;
   7587 	sc->sc_ec_capenable = ec->ec_capenable;
   7588 	if (error)
   7589 		log(LOG_ERR, "%s: interface not running\n",
   7590 		    device_xname(sc->sc_dev));
   7591 	return error;
   7592 }
   7593 
   7594 /*
   7595  * wm_stop:		[ifnet interface function]
   7596  *
   7597  *	Stop transmission on the interface.
   7598  */
   7599 static void
   7600 wm_stop(struct ifnet *ifp, int disable)
   7601 {
   7602 	struct wm_softc *sc = ifp->if_softc;
   7603 
   7604 	ASSERT_SLEEPABLE();
   7605 	KASSERT(IFNET_LOCKED(ifp));
   7606 
   7607 	mutex_enter(sc->sc_core_lock);
   7608 	wm_stop_locked(ifp, disable ? true : false, true);
   7609 	mutex_exit(sc->sc_core_lock);
   7610 
   7611 	/*
   7612 	 * After wm_set_stopping_flags(), it is guaranteed that
   7613 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7614 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7615 	 * because it can sleep...
   7616 	 * so, call workqueue_wait() here.
   7617 	 */
   7618 	for (int i = 0; i < sc->sc_nqueues; i++)
   7619 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7620 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7621 }
   7622 
   7623 static void
   7624 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7625 {
   7626 	struct wm_softc *sc = ifp->if_softc;
   7627 	struct wm_txsoft *txs;
   7628 	int i, qidx;
   7629 
   7630 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7631 		device_xname(sc->sc_dev), __func__));
   7632 	KASSERT(IFNET_LOCKED(ifp));
   7633 	KASSERT(mutex_owned(sc->sc_core_lock));
   7634 
   7635 	wm_set_stopping_flags(sc);
   7636 
   7637 	if (sc->sc_flags & WM_F_HAS_MII) {
   7638 		/* Down the MII. */
   7639 		mii_down(&sc->sc_mii);
   7640 	} else {
   7641 #if 0
   7642 		/* Should we clear PHY's status properly? */
   7643 		wm_reset(sc);
   7644 #endif
   7645 	}
   7646 
   7647 	/* Stop the transmit and receive processes. */
   7648 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7649 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7650 	sc->sc_rctl &= ~RCTL_EN;
   7651 
   7652 	/*
   7653 	 * Clear the interrupt mask to ensure the device cannot assert its
   7654 	 * interrupt line.
   7655 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7656 	 * service any currently pending or shared interrupt.
   7657 	 */
   7658 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7659 	sc->sc_icr = 0;
   7660 	if (wm_is_using_msix(sc)) {
   7661 		if (sc->sc_type != WM_T_82574) {
   7662 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7663 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7664 		} else
   7665 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7666 	}
   7667 
   7668 	/*
   7669 	 * Stop callouts after interrupts are disabled; if we have
   7670 	 * to wait for them, we will be releasing the CORE_LOCK
   7671 	 * briefly, which will unblock interrupts on the current CPU.
   7672 	 */
   7673 
   7674 	/* Stop the one second clock. */
   7675 	if (wait)
   7676 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7677 	else
   7678 		callout_stop(&sc->sc_tick_ch);
   7679 
   7680 	/* Stop the 82547 Tx FIFO stall check timer. */
   7681 	if (sc->sc_type == WM_T_82547) {
   7682 		if (wait)
   7683 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7684 		else
   7685 			callout_stop(&sc->sc_txfifo_ch);
   7686 	}
   7687 
   7688 	/* Release any queued transmit buffers. */
   7689 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7690 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7691 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7692 		struct mbuf *m;
   7693 
   7694 		mutex_enter(txq->txq_lock);
   7695 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7696 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7697 			txs = &txq->txq_soft[i];
   7698 			if (txs->txs_mbuf != NULL) {
   7699 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7700 				m_freem(txs->txs_mbuf);
   7701 				txs->txs_mbuf = NULL;
   7702 			}
   7703 		}
   7704 		/* Drain txq_interq */
   7705 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7706 			m_freem(m);
   7707 		mutex_exit(txq->txq_lock);
   7708 	}
   7709 
   7710 	/* Mark the interface as down and cancel the watchdog timer. */
   7711 	ifp->if_flags &= ~IFF_RUNNING;
   7712 	sc->sc_if_flags = ifp->if_flags;
   7713 
   7714 	if (disable) {
   7715 		for (i = 0; i < sc->sc_nqueues; i++) {
   7716 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7717 			mutex_enter(rxq->rxq_lock);
   7718 			wm_rxdrain(rxq);
   7719 			mutex_exit(rxq->rxq_lock);
   7720 		}
   7721 	}
   7722 
   7723 #if 0 /* notyet */
   7724 	if (sc->sc_type >= WM_T_82544)
   7725 		CSR_WRITE(sc, WMREG_WUC, 0);
   7726 #endif
   7727 }
   7728 
   7729 static void
   7730 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7731 {
   7732 	struct mbuf *m;
   7733 	int i;
   7734 
   7735 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7736 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7737 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7738 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7739 		    m->m_data, m->m_len, m->m_flags);
   7740 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7741 	    i, i == 1 ? "" : "s");
   7742 }
   7743 
   7744 /*
   7745  * wm_82547_txfifo_stall:
   7746  *
   7747  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7748  *	reset the FIFO pointers, and restart packet transmission.
   7749  */
   7750 static void
   7751 wm_82547_txfifo_stall(void *arg)
   7752 {
   7753 	struct wm_softc *sc = arg;
   7754 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7755 
   7756 	mutex_enter(txq->txq_lock);
   7757 
   7758 	if (txq->txq_stopping)
   7759 		goto out;
   7760 
   7761 	if (txq->txq_fifo_stall) {
   7762 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7763 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7764 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7765 			/*
   7766 			 * Packets have drained.  Stop transmitter, reset
   7767 			 * FIFO pointers, restart transmitter, and kick
   7768 			 * the packet queue.
   7769 			 */
   7770 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7771 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7772 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7773 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7774 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7775 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7776 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7777 			CSR_WRITE_FLUSH(sc);
   7778 
   7779 			txq->txq_fifo_head = 0;
   7780 			txq->txq_fifo_stall = 0;
   7781 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7782 		} else {
   7783 			/*
   7784 			 * Still waiting for packets to drain; try again in
   7785 			 * another tick.
   7786 			 */
   7787 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7788 		}
   7789 	}
   7790 
   7791 out:
   7792 	mutex_exit(txq->txq_lock);
   7793 }
   7794 
   7795 /*
   7796  * wm_82547_txfifo_bugchk:
   7797  *
   7798  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7799  *	prevent enqueueing a packet that would wrap around the end
   7800  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7801  *
   7802  *	We do this by checking the amount of space before the end
   7803  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7804  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7805  *	the internal FIFO pointers to the beginning, and restart
   7806  *	transmission on the interface.
   7807  */
   7808 #define	WM_FIFO_HDR		0x10
   7809 #define	WM_82547_PAD_LEN	0x3e0
   7810 static int
   7811 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7812 {
   7813 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7814 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7815 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7816 
   7817 	/* Just return if already stalled. */
   7818 	if (txq->txq_fifo_stall)
   7819 		return 1;
   7820 
   7821 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7822 		/* Stall only occurs in half-duplex mode. */
   7823 		goto send_packet;
   7824 	}
   7825 
   7826 	if (len >= WM_82547_PAD_LEN + space) {
   7827 		txq->txq_fifo_stall = 1;
   7828 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7829 		return 1;
   7830 	}
   7831 
   7832 send_packet:
   7833 	txq->txq_fifo_head += len;
   7834 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7835 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7836 
   7837 	return 0;
   7838 }
   7839 
   7840 static int
   7841 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7842 {
   7843 	int error;
   7844 
   7845 	/*
   7846 	 * Allocate the control data structures, and create and load the
   7847 	 * DMA map for it.
   7848 	 *
   7849 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7850 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7851 	 * both sets within the same 4G segment.
   7852 	 */
   7853 	if (sc->sc_type < WM_T_82544)
   7854 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7855 	else
   7856 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7857 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7858 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7859 	else
   7860 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7861 
   7862 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7863 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7864 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7865 		aprint_error_dev(sc->sc_dev,
   7866 		    "unable to allocate TX control data, error = %d\n",
   7867 		    error);
   7868 		goto fail_0;
   7869 	}
   7870 
   7871 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7872 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7873 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7874 		aprint_error_dev(sc->sc_dev,
   7875 		    "unable to map TX control data, error = %d\n", error);
   7876 		goto fail_1;
   7877 	}
   7878 
   7879 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7880 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7881 		aprint_error_dev(sc->sc_dev,
   7882 		    "unable to create TX control data DMA map, error = %d\n",
   7883 		    error);
   7884 		goto fail_2;
   7885 	}
   7886 
   7887 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7888 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7889 		aprint_error_dev(sc->sc_dev,
   7890 		    "unable to load TX control data DMA map, error = %d\n",
   7891 		    error);
   7892 		goto fail_3;
   7893 	}
   7894 
   7895 	return 0;
   7896 
   7897 fail_3:
   7898 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7899 fail_2:
   7900 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7901 	    WM_TXDESCS_SIZE(txq));
   7902 fail_1:
   7903 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7904 fail_0:
   7905 	return error;
   7906 }
   7907 
   7908 static void
   7909 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7910 {
   7911 
   7912 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7913 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7914 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7915 	    WM_TXDESCS_SIZE(txq));
   7916 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7917 }
   7918 
   7919 static int
   7920 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7921 {
   7922 	int error;
   7923 	size_t rxq_descs_size;
   7924 
   7925 	/*
   7926 	 * Allocate the control data structures, and create and load the
   7927 	 * DMA map for it.
   7928 	 *
   7929 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7930 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7931 	 * both sets within the same 4G segment.
   7932 	 */
   7933 	rxq->rxq_ndesc = WM_NRXDESC;
   7934 	if (sc->sc_type == WM_T_82574)
   7935 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7936 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7937 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7938 	else
   7939 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7940 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7941 
   7942 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7943 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7944 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7945 		aprint_error_dev(sc->sc_dev,
   7946 		    "unable to allocate RX control data, error = %d\n",
   7947 		    error);
   7948 		goto fail_0;
   7949 	}
   7950 
   7951 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7952 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7953 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7954 		aprint_error_dev(sc->sc_dev,
   7955 		    "unable to map RX control data, error = %d\n", error);
   7956 		goto fail_1;
   7957 	}
   7958 
   7959 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7960 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7961 		aprint_error_dev(sc->sc_dev,
   7962 		    "unable to create RX control data DMA map, error = %d\n",
   7963 		    error);
   7964 		goto fail_2;
   7965 	}
   7966 
   7967 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7968 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7969 		aprint_error_dev(sc->sc_dev,
   7970 		    "unable to load RX control data DMA map, error = %d\n",
   7971 		    error);
   7972 		goto fail_3;
   7973 	}
   7974 
   7975 	return 0;
   7976 
   7977  fail_3:
   7978 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7979  fail_2:
   7980 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7981 	    rxq_descs_size);
   7982  fail_1:
   7983 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7984  fail_0:
   7985 	return error;
   7986 }
   7987 
   7988 static void
   7989 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7990 {
   7991 
   7992 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7993 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7994 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7995 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7996 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7997 }
   7998 
   7999 
   8000 static int
   8001 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   8002 {
   8003 	int i, error;
   8004 
   8005 	/* Create the transmit buffer DMA maps. */
   8006 	WM_TXQUEUELEN(txq) =
   8007 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   8008 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   8009 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   8010 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   8011 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   8012 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   8013 			aprint_error_dev(sc->sc_dev,
   8014 			    "unable to create Tx DMA map %d, error = %d\n",
   8015 			    i, error);
   8016 			goto fail;
   8017 		}
   8018 	}
   8019 
   8020 	return 0;
   8021 
   8022 fail:
   8023 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   8024 		if (txq->txq_soft[i].txs_dmamap != NULL)
   8025 			bus_dmamap_destroy(sc->sc_dmat,
   8026 			    txq->txq_soft[i].txs_dmamap);
   8027 	}
   8028 	return error;
   8029 }
   8030 
   8031 static void
   8032 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   8033 {
   8034 	int i;
   8035 
   8036 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   8037 		if (txq->txq_soft[i].txs_dmamap != NULL)
   8038 			bus_dmamap_destroy(sc->sc_dmat,
   8039 			    txq->txq_soft[i].txs_dmamap);
   8040 	}
   8041 }
   8042 
   8043 static int
   8044 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8045 {
   8046 	int i, error;
   8047 
   8048 	/* Create the receive buffer DMA maps. */
   8049 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8050 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   8051 			    MCLBYTES, 0, 0,
   8052 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   8053 			aprint_error_dev(sc->sc_dev,
   8054 			    "unable to create Rx DMA map %d error = %d\n",
   8055 			    i, error);
   8056 			goto fail;
   8057 		}
   8058 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   8059 	}
   8060 
   8061 	return 0;
   8062 
   8063  fail:
   8064 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8065 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   8066 			bus_dmamap_destroy(sc->sc_dmat,
   8067 			    rxq->rxq_soft[i].rxs_dmamap);
   8068 	}
   8069 	return error;
   8070 }
   8071 
   8072 static void
   8073 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8074 {
   8075 	int i;
   8076 
   8077 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8078 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   8079 			bus_dmamap_destroy(sc->sc_dmat,
   8080 			    rxq->rxq_soft[i].rxs_dmamap);
   8081 	}
   8082 }
   8083 
   8084 /*
   8085  * wm_alloc_quques:
   8086  *	Allocate {tx,rx}descs and {tx,rx} buffers
   8087  */
   8088 static int
   8089 wm_alloc_txrx_queues(struct wm_softc *sc)
   8090 {
   8091 	int i, error, tx_done, rx_done;
   8092 
   8093 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   8094 	    KM_SLEEP);
   8095 	if (sc->sc_queue == NULL) {
   8096 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   8097 		error = ENOMEM;
   8098 		goto fail_0;
   8099 	}
   8100 
   8101 	/* For transmission */
   8102 	error = 0;
   8103 	tx_done = 0;
   8104 	for (i = 0; i < sc->sc_nqueues; i++) {
   8105 #ifdef WM_EVENT_COUNTERS
   8106 		int j;
   8107 		const char *xname;
   8108 #endif
   8109 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8110 		txq->txq_sc = sc;
   8111 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8112 
   8113 		error = wm_alloc_tx_descs(sc, txq);
   8114 		if (error)
   8115 			break;
   8116 		error = wm_alloc_tx_buffer(sc, txq);
   8117 		if (error) {
   8118 			wm_free_tx_descs(sc, txq);
   8119 			break;
   8120 		}
   8121 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   8122 		if (txq->txq_interq == NULL) {
   8123 			wm_free_tx_descs(sc, txq);
   8124 			wm_free_tx_buffer(sc, txq);
   8125 			error = ENOMEM;
   8126 			break;
   8127 		}
   8128 
   8129 #ifdef WM_EVENT_COUNTERS
   8130 		xname = device_xname(sc->sc_dev);
   8131 
   8132 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   8133 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   8134 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   8135 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   8136 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   8137 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   8138 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   8139 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   8140 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   8141 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   8142 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   8143 
   8144 		for (j = 0; j < WM_NTXSEGS; j++) {
   8145 			snprintf(txq->txq_txseg_evcnt_names[j],
   8146 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   8147 			    "txq%02dtxseg%d", i, j);
   8148 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   8149 			    EVCNT_TYPE_MISC,
   8150 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   8151 		}
   8152 
   8153 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   8154 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   8155 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   8156 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   8157 		/* Only for 82544 (and earlier?) */
   8158 		if (sc->sc_type <= WM_T_82544)
   8159 			WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   8160 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   8161 #endif /* WM_EVENT_COUNTERS */
   8162 
   8163 		tx_done++;
   8164 	}
   8165 	if (error)
   8166 		goto fail_1;
   8167 
   8168 	/* For receive */
   8169 	error = 0;
   8170 	rx_done = 0;
   8171 	for (i = 0; i < sc->sc_nqueues; i++) {
   8172 #ifdef WM_EVENT_COUNTERS
   8173 		const char *xname;
   8174 #endif
   8175 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8176 		rxq->rxq_sc = sc;
   8177 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   8178 
   8179 		error = wm_alloc_rx_descs(sc, rxq);
   8180 		if (error)
   8181 			break;
   8182 
   8183 		error = wm_alloc_rx_buffer(sc, rxq);
   8184 		if (error) {
   8185 			wm_free_rx_descs(sc, rxq);
   8186 			break;
   8187 		}
   8188 
   8189 #ifdef WM_EVENT_COUNTERS
   8190 		xname = device_xname(sc->sc_dev);
   8191 
   8192 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   8193 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   8194 
   8195 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   8196 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   8197 #endif /* WM_EVENT_COUNTERS */
   8198 
   8199 		rx_done++;
   8200 	}
   8201 	if (error)
   8202 		goto fail_2;
   8203 
   8204 	return 0;
   8205 
   8206 fail_2:
   8207 	for (i = 0; i < rx_done; i++) {
   8208 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8209 		wm_free_rx_buffer(sc, rxq);
   8210 		wm_free_rx_descs(sc, rxq);
   8211 		if (rxq->rxq_lock)
   8212 			mutex_obj_free(rxq->rxq_lock);
   8213 	}
   8214 fail_1:
   8215 	for (i = 0; i < tx_done; i++) {
   8216 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8217 		pcq_destroy(txq->txq_interq);
   8218 		wm_free_tx_buffer(sc, txq);
   8219 		wm_free_tx_descs(sc, txq);
   8220 		if (txq->txq_lock)
   8221 			mutex_obj_free(txq->txq_lock);
   8222 	}
   8223 
   8224 	kmem_free(sc->sc_queue,
   8225 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   8226 fail_0:
   8227 	return error;
   8228 }
   8229 
   8230 /*
   8231  * wm_free_quques:
   8232  *	Free {tx,rx}descs and {tx,rx} buffers
   8233  */
   8234 static void
   8235 wm_free_txrx_queues(struct wm_softc *sc)
   8236 {
   8237 	int i;
   8238 
   8239 	for (i = 0; i < sc->sc_nqueues; i++) {
   8240 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   8241 
   8242 #ifdef WM_EVENT_COUNTERS
   8243 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   8244 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   8245 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   8246 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   8247 #endif /* WM_EVENT_COUNTERS */
   8248 
   8249 		wm_free_rx_buffer(sc, rxq);
   8250 		wm_free_rx_descs(sc, rxq);
   8251 		if (rxq->rxq_lock)
   8252 			mutex_obj_free(rxq->rxq_lock);
   8253 	}
   8254 
   8255 	for (i = 0; i < sc->sc_nqueues; i++) {
   8256 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   8257 		struct mbuf *m;
   8258 #ifdef WM_EVENT_COUNTERS
   8259 		int j;
   8260 
   8261 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   8262 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   8263 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   8264 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   8265 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   8266 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   8267 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   8268 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   8269 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   8270 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   8271 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   8272 
   8273 		for (j = 0; j < WM_NTXSEGS; j++)
   8274 			evcnt_detach(&txq->txq_ev_txseg[j]);
   8275 
   8276 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   8277 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   8278 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   8279 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   8280 		if (sc->sc_type <= WM_T_82544)
   8281 			WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   8282 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   8283 #endif /* WM_EVENT_COUNTERS */
   8284 
   8285 		/* Drain txq_interq */
   8286 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   8287 			m_freem(m);
   8288 		pcq_destroy(txq->txq_interq);
   8289 
   8290 		wm_free_tx_buffer(sc, txq);
   8291 		wm_free_tx_descs(sc, txq);
   8292 		if (txq->txq_lock)
   8293 			mutex_obj_free(txq->txq_lock);
   8294 	}
   8295 
   8296 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   8297 }
   8298 
   8299 static void
   8300 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8301 {
   8302 
   8303 	KASSERT(mutex_owned(txq->txq_lock));
   8304 
   8305 	/* Initialize the transmit descriptor ring. */
   8306 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   8307 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   8308 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8309 	txq->txq_free = WM_NTXDESC(txq);
   8310 	txq->txq_next = 0;
   8311 }
   8312 
   8313 static void
   8314 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8315     struct wm_txqueue *txq)
   8316 {
   8317 
   8318 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8319 		device_xname(sc->sc_dev), __func__));
   8320 	KASSERT(mutex_owned(txq->txq_lock));
   8321 
   8322 	if (sc->sc_type < WM_T_82543) {
   8323 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   8324 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   8325 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   8326 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   8327 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   8328 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   8329 	} else {
   8330 		int qid = wmq->wmq_id;
   8331 
   8332 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   8333 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   8334 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   8335 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   8336 
   8337 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8338 			/*
   8339 			 * Don't write TDT before TCTL.EN is set.
   8340 			 * See the document.
   8341 			 */
   8342 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   8343 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   8344 			    | TXDCTL_WTHRESH(0));
   8345 		else {
   8346 			/* XXX should update with AIM? */
   8347 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   8348 			if (sc->sc_type >= WM_T_82540) {
   8349 				/* Should be the same */
   8350 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   8351 			}
   8352 
   8353 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   8354 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   8355 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   8356 		}
   8357 	}
   8358 }
   8359 
   8360 static void
   8361 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8362 {
   8363 	int i;
   8364 
   8365 	KASSERT(mutex_owned(txq->txq_lock));
   8366 
   8367 	/* Initialize the transmit job descriptors. */
   8368 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   8369 		txq->txq_soft[i].txs_mbuf = NULL;
   8370 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   8371 	txq->txq_snext = 0;
   8372 	txq->txq_sdirty = 0;
   8373 }
   8374 
   8375 static void
   8376 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8377     struct wm_txqueue *txq)
   8378 {
   8379 
   8380 	KASSERT(mutex_owned(txq->txq_lock));
   8381 
   8382 	/*
   8383 	 * Set up some register offsets that are different between
   8384 	 * the i82542 and the i82543 and later chips.
   8385 	 */
   8386 	if (sc->sc_type < WM_T_82543)
   8387 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   8388 	else
   8389 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   8390 
   8391 	wm_init_tx_descs(sc, txq);
   8392 	wm_init_tx_regs(sc, wmq, txq);
   8393 	wm_init_tx_buffer(sc, txq);
   8394 
   8395 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   8396 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   8397 
   8398 	txq->txq_sending = false;
   8399 }
   8400 
   8401 static void
   8402 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8403     struct wm_rxqueue *rxq)
   8404 {
   8405 
   8406 	KASSERT(mutex_owned(rxq->rxq_lock));
   8407 
   8408 	/*
   8409 	 * Initialize the receive descriptor and receive job
   8410 	 * descriptor rings.
   8411 	 */
   8412 	if (sc->sc_type < WM_T_82543) {
   8413 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   8414 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   8415 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   8416 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8417 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   8418 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   8419 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   8420 
   8421 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   8422 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   8423 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   8424 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   8425 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   8426 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   8427 	} else {
   8428 		int qid = wmq->wmq_id;
   8429 
   8430 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   8431 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   8432 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   8433 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8434 
   8435 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8436 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   8437 				panic("%s: MCLBYTES %d unsupported for 82575 "
   8438 				    "or higher\n", __func__, MCLBYTES);
   8439 
   8440 			/*
   8441 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   8442 			 * only.
   8443 			 */
   8444 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   8445 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   8446 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   8447 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   8448 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   8449 			    | RXDCTL_WTHRESH(1));
   8450 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8451 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8452 		} else {
   8453 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8454 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8455 			/* XXX should update with AIM? */
   8456 			CSR_WRITE(sc, WMREG_RDTR,
   8457 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   8458 			/* MUST be same */
   8459 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   8460 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   8461 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   8462 		}
   8463 	}
   8464 }
   8465 
   8466 static int
   8467 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8468 {
   8469 	struct wm_rxsoft *rxs;
   8470 	int error, i;
   8471 
   8472 	KASSERT(mutex_owned(rxq->rxq_lock));
   8473 
   8474 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8475 		rxs = &rxq->rxq_soft[i];
   8476 		if (rxs->rxs_mbuf == NULL) {
   8477 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   8478 				log(LOG_ERR, "%s: unable to allocate or map "
   8479 				    "rx buffer %d, error = %d\n",
   8480 				    device_xname(sc->sc_dev), i, error);
   8481 				/*
   8482 				 * XXX Should attempt to run with fewer receive
   8483 				 * XXX buffers instead of just failing.
   8484 				 */
   8485 				wm_rxdrain(rxq);
   8486 				return ENOMEM;
   8487 			}
   8488 		} else {
   8489 			/*
   8490 			 * For 82575 and 82576, the RX descriptors must be
   8491 			 * initialized after the setting of RCTL.EN in
   8492 			 * wm_set_filter()
   8493 			 */
   8494 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8495 				wm_init_rxdesc(rxq, i);
   8496 		}
   8497 	}
   8498 	rxq->rxq_ptr = 0;
   8499 	rxq->rxq_discard = 0;
   8500 	WM_RXCHAIN_RESET(rxq);
   8501 
   8502 	return 0;
   8503 }
   8504 
   8505 static int
   8506 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8507     struct wm_rxqueue *rxq)
   8508 {
   8509 
   8510 	KASSERT(mutex_owned(rxq->rxq_lock));
   8511 
   8512 	/*
   8513 	 * Set up some register offsets that are different between
   8514 	 * the i82542 and the i82543 and later chips.
   8515 	 */
   8516 	if (sc->sc_type < WM_T_82543)
   8517 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8518 	else
   8519 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8520 
   8521 	wm_init_rx_regs(sc, wmq, rxq);
   8522 	return wm_init_rx_buffer(sc, rxq);
   8523 }
   8524 
   8525 /*
   8526  * wm_init_quques:
   8527  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8528  */
   8529 static int
   8530 wm_init_txrx_queues(struct wm_softc *sc)
   8531 {
   8532 	int i, error = 0;
   8533 
   8534 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8535 		device_xname(sc->sc_dev), __func__));
   8536 
   8537 	for (i = 0; i < sc->sc_nqueues; i++) {
   8538 		struct wm_queue *wmq = &sc->sc_queue[i];
   8539 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8540 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8541 
   8542 		/*
   8543 		 * TODO
   8544 		 * Currently, use constant variable instead of AIM.
   8545 		 * Furthermore, the interrupt interval of multiqueue which use
   8546 		 * polling mode is less than default value.
   8547 		 * More tuning and AIM are required.
   8548 		 */
   8549 		if (wm_is_using_multiqueue(sc))
   8550 			wmq->wmq_itr = 50;
   8551 		else
   8552 			wmq->wmq_itr = sc->sc_itr_init;
   8553 		wmq->wmq_set_itr = true;
   8554 
   8555 		mutex_enter(txq->txq_lock);
   8556 		wm_init_tx_queue(sc, wmq, txq);
   8557 		mutex_exit(txq->txq_lock);
   8558 
   8559 		mutex_enter(rxq->rxq_lock);
   8560 		error = wm_init_rx_queue(sc, wmq, rxq);
   8561 		mutex_exit(rxq->rxq_lock);
   8562 		if (error)
   8563 			break;
   8564 	}
   8565 
   8566 	return error;
   8567 }
   8568 
   8569 /*
   8570  * wm_tx_offload:
   8571  *
   8572  *	Set up TCP/IP checksumming parameters for the
   8573  *	specified packet.
   8574  */
   8575 static void
   8576 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8577     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8578 {
   8579 	struct mbuf *m0 = txs->txs_mbuf;
   8580 	struct livengood_tcpip_ctxdesc *t;
   8581 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8582 	uint32_t ipcse;
   8583 	struct ether_header *eh;
   8584 	int offset, iphl;
   8585 	uint8_t fields;
   8586 
   8587 	/*
   8588 	 * XXX It would be nice if the mbuf pkthdr had offset
   8589 	 * fields for the protocol headers.
   8590 	 */
   8591 
   8592 	eh = mtod(m0, struct ether_header *);
   8593 	switch (htons(eh->ether_type)) {
   8594 	case ETHERTYPE_IP:
   8595 	case ETHERTYPE_IPV6:
   8596 		offset = ETHER_HDR_LEN;
   8597 		break;
   8598 
   8599 	case ETHERTYPE_VLAN:
   8600 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8601 		break;
   8602 
   8603 	default:
   8604 		/* Don't support this protocol or encapsulation. */
   8605 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8606 		txq->txq_last_hw_ipcs = 0;
   8607 		txq->txq_last_hw_tucs = 0;
   8608 		*fieldsp = 0;
   8609 		*cmdp = 0;
   8610 		return;
   8611 	}
   8612 
   8613 	if ((m0->m_pkthdr.csum_flags &
   8614 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8615 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8616 	} else
   8617 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8618 
   8619 	ipcse = offset + iphl - 1;
   8620 
   8621 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8622 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8623 	seg = 0;
   8624 	fields = 0;
   8625 
   8626 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8627 		int hlen = offset + iphl;
   8628 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8629 
   8630 		if (__predict_false(m0->m_len <
   8631 				    (hlen + sizeof(struct tcphdr)))) {
   8632 			/*
   8633 			 * TCP/IP headers are not in the first mbuf; we need
   8634 			 * to do this the slow and painful way. Let's just
   8635 			 * hope this doesn't happen very often.
   8636 			 */
   8637 			struct tcphdr th;
   8638 
   8639 			WM_Q_EVCNT_INCR(txq, tsopain);
   8640 
   8641 			m_copydata(m0, hlen, sizeof(th), &th);
   8642 			if (v4) {
   8643 				struct ip ip;
   8644 
   8645 				m_copydata(m0, offset, sizeof(ip), &ip);
   8646 				ip.ip_len = 0;
   8647 				m_copyback(m0,
   8648 				    offset + offsetof(struct ip, ip_len),
   8649 				    sizeof(ip.ip_len), &ip.ip_len);
   8650 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8651 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8652 			} else {
   8653 				struct ip6_hdr ip6;
   8654 
   8655 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8656 				ip6.ip6_plen = 0;
   8657 				m_copyback(m0,
   8658 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8659 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8660 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8661 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8662 			}
   8663 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8664 			    sizeof(th.th_sum), &th.th_sum);
   8665 
   8666 			hlen += th.th_off << 2;
   8667 		} else {
   8668 			/*
   8669 			 * TCP/IP headers are in the first mbuf; we can do
   8670 			 * this the easy way.
   8671 			 */
   8672 			struct tcphdr *th;
   8673 
   8674 			if (v4) {
   8675 				struct ip *ip =
   8676 				    (void *)(mtod(m0, char *) + offset);
   8677 				th = (void *)(mtod(m0, char *) + hlen);
   8678 
   8679 				ip->ip_len = 0;
   8680 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8681 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8682 			} else {
   8683 				struct ip6_hdr *ip6 =
   8684 				    (void *)(mtod(m0, char *) + offset);
   8685 				th = (void *)(mtod(m0, char *) + hlen);
   8686 
   8687 				ip6->ip6_plen = 0;
   8688 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8689 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8690 			}
   8691 			hlen += th->th_off << 2;
   8692 		}
   8693 
   8694 		if (v4) {
   8695 			WM_Q_EVCNT_INCR(txq, tso);
   8696 			cmdlen |= WTX_TCPIP_CMD_IP;
   8697 		} else {
   8698 			WM_Q_EVCNT_INCR(txq, tso6);
   8699 			ipcse = 0;
   8700 		}
   8701 		cmd |= WTX_TCPIP_CMD_TSE;
   8702 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8703 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8704 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8705 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8706 	}
   8707 
   8708 	/*
   8709 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8710 	 * offload feature, if we load the context descriptor, we
   8711 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8712 	 */
   8713 
   8714 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8715 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8716 	    WTX_TCPIP_IPCSE(ipcse);
   8717 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8718 		WM_Q_EVCNT_INCR(txq, ipsum);
   8719 		fields |= WTX_IXSM;
   8720 	}
   8721 
   8722 	offset += iphl;
   8723 
   8724 	if (m0->m_pkthdr.csum_flags &
   8725 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8726 		WM_Q_EVCNT_INCR(txq, tusum);
   8727 		fields |= WTX_TXSM;
   8728 		tucs = WTX_TCPIP_TUCSS(offset) |
   8729 		    WTX_TCPIP_TUCSO(offset +
   8730 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8731 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8732 	} else if ((m0->m_pkthdr.csum_flags &
   8733 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8734 		WM_Q_EVCNT_INCR(txq, tusum6);
   8735 		fields |= WTX_TXSM;
   8736 		tucs = WTX_TCPIP_TUCSS(offset) |
   8737 		    WTX_TCPIP_TUCSO(offset +
   8738 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8739 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8740 	} else {
   8741 		/* Just initialize it to a valid TCP context. */
   8742 		tucs = WTX_TCPIP_TUCSS(offset) |
   8743 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8744 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8745 	}
   8746 
   8747 	*cmdp = cmd;
   8748 	*fieldsp = fields;
   8749 
   8750 	/*
   8751 	 * We don't have to write context descriptor for every packet
   8752 	 * except for 82574. For 82574, we must write context descriptor
   8753 	 * for every packet when we use two descriptor queues.
   8754 	 *
   8755 	 * The 82574L can only remember the *last* context used
   8756 	 * regardless of queue that it was use for.  We cannot reuse
   8757 	 * contexts on this hardware platform and must generate a new
   8758 	 * context every time.  82574L hardware spec, section 7.2.6,
   8759 	 * second note.
   8760 	 */
   8761 	if (sc->sc_nqueues < 2) {
   8762 		/*
   8763 		 * Setting up new checksum offload context for every
   8764 		 * frames takes a lot of processing time for hardware.
   8765 		 * This also reduces performance a lot for small sized
   8766 		 * frames so avoid it if driver can use previously
   8767 		 * configured checksum offload context.
   8768 		 * For TSO, in theory we can use the same TSO context only if
   8769 		 * frame is the same type(IP/TCP) and the same MSS. However
   8770 		 * checking whether a frame has the same IP/TCP structure is a
   8771 		 * hard thing so just ignore that and always restablish a
   8772 		 * new TSO context.
   8773 		 */
   8774 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8775 		    == 0) {
   8776 			if (txq->txq_last_hw_cmd == cmd &&
   8777 			    txq->txq_last_hw_fields == fields &&
   8778 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8779 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8780 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8781 				return;
   8782 			}
   8783 		}
   8784 
   8785 		txq->txq_last_hw_cmd = cmd;
   8786 		txq->txq_last_hw_fields = fields;
   8787 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8788 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8789 	}
   8790 
   8791 	/* Fill in the context descriptor. */
   8792 	t = (struct livengood_tcpip_ctxdesc *)
   8793 	    &txq->txq_descs[txq->txq_next];
   8794 	t->tcpip_ipcs = htole32(ipcs);
   8795 	t->tcpip_tucs = htole32(tucs);
   8796 	t->tcpip_cmdlen = htole32(cmdlen);
   8797 	t->tcpip_seg = htole32(seg);
   8798 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8799 
   8800 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8801 	txs->txs_ndesc++;
   8802 }
   8803 
   8804 static inline int
   8805 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8806 {
   8807 	struct wm_softc *sc = ifp->if_softc;
   8808 	u_int cpuid = cpu_index(curcpu());
   8809 
   8810 	/*
   8811 	 * Currently, simple distribute strategy.
   8812 	 * TODO:
   8813 	 * distribute by flowid(RSS has value).
   8814 	 */
   8815 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8816 }
   8817 
   8818 static inline bool
   8819 wm_linkdown_discard(struct wm_txqueue *txq)
   8820 {
   8821 
   8822 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8823 		return true;
   8824 
   8825 	return false;
   8826 }
   8827 
   8828 /*
   8829  * wm_start:		[ifnet interface function]
   8830  *
   8831  *	Start packet transmission on the interface.
   8832  */
   8833 static void
   8834 wm_start(struct ifnet *ifp)
   8835 {
   8836 	struct wm_softc *sc = ifp->if_softc;
   8837 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8838 
   8839 	KASSERT(if_is_mpsafe(ifp));
   8840 	/*
   8841 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8842 	 */
   8843 
   8844 	mutex_enter(txq->txq_lock);
   8845 	if (!txq->txq_stopping)
   8846 		wm_start_locked(ifp);
   8847 	mutex_exit(txq->txq_lock);
   8848 }
   8849 
   8850 static void
   8851 wm_start_locked(struct ifnet *ifp)
   8852 {
   8853 	struct wm_softc *sc = ifp->if_softc;
   8854 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8855 
   8856 	wm_send_common_locked(ifp, txq, false);
   8857 }
   8858 
   8859 static int
   8860 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8861 {
   8862 	int qid;
   8863 	struct wm_softc *sc = ifp->if_softc;
   8864 	struct wm_txqueue *txq;
   8865 
   8866 	qid = wm_select_txqueue(ifp, m);
   8867 	txq = &sc->sc_queue[qid].wmq_txq;
   8868 
   8869 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8870 		m_freem(m);
   8871 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8872 		return ENOBUFS;
   8873 	}
   8874 
   8875 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8876 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8877 	if (m->m_flags & M_MCAST)
   8878 		if_statinc_ref(nsr, if_omcasts);
   8879 	IF_STAT_PUTREF(ifp);
   8880 
   8881 	if (mutex_tryenter(txq->txq_lock)) {
   8882 		if (!txq->txq_stopping)
   8883 			wm_transmit_locked(ifp, txq);
   8884 		mutex_exit(txq->txq_lock);
   8885 	}
   8886 
   8887 	return 0;
   8888 }
   8889 
   8890 static void
   8891 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8892 {
   8893 
   8894 	wm_send_common_locked(ifp, txq, true);
   8895 }
   8896 
   8897 static void
   8898 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8899     bool is_transmit)
   8900 {
   8901 	struct wm_softc *sc = ifp->if_softc;
   8902 	struct mbuf *m0;
   8903 	struct wm_txsoft *txs;
   8904 	bus_dmamap_t dmamap;
   8905 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8906 	bus_addr_t curaddr;
   8907 	bus_size_t seglen, curlen;
   8908 	uint32_t cksumcmd;
   8909 	uint8_t cksumfields;
   8910 	bool remap = true;
   8911 
   8912 	KASSERT(mutex_owned(txq->txq_lock));
   8913 	KASSERT(!txq->txq_stopping);
   8914 
   8915 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8916 		return;
   8917 
   8918 	if (__predict_false(wm_linkdown_discard(txq))) {
   8919 		do {
   8920 			if (is_transmit)
   8921 				m0 = pcq_get(txq->txq_interq);
   8922 			else
   8923 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8924 			/*
   8925 			 * increment successed packet counter as in the case
   8926 			 * which the packet is discarded by link down PHY.
   8927 			 */
   8928 			if (m0 != NULL) {
   8929 				if_statinc(ifp, if_opackets);
   8930 				m_freem(m0);
   8931 			}
   8932 		} while (m0 != NULL);
   8933 		return;
   8934 	}
   8935 
   8936 	/* Remember the previous number of free descriptors. */
   8937 	ofree = txq->txq_free;
   8938 
   8939 	/*
   8940 	 * Loop through the send queue, setting up transmit descriptors
   8941 	 * until we drain the queue, or use up all available transmit
   8942 	 * descriptors.
   8943 	 */
   8944 	for (;;) {
   8945 		m0 = NULL;
   8946 
   8947 		/* Get a work queue entry. */
   8948 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8949 			wm_txeof(txq, UINT_MAX);
   8950 			if (txq->txq_sfree == 0) {
   8951 				DPRINTF(sc, WM_DEBUG_TX,
   8952 				    ("%s: TX: no free job descriptors\n",
   8953 					device_xname(sc->sc_dev)));
   8954 				WM_Q_EVCNT_INCR(txq, txsstall);
   8955 				break;
   8956 			}
   8957 		}
   8958 
   8959 		/* Grab a packet off the queue. */
   8960 		if (is_transmit)
   8961 			m0 = pcq_get(txq->txq_interq);
   8962 		else
   8963 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8964 		if (m0 == NULL)
   8965 			break;
   8966 
   8967 		DPRINTF(sc, WM_DEBUG_TX,
   8968 		    ("%s: TX: have packet to transmit: %p\n",
   8969 			device_xname(sc->sc_dev), m0));
   8970 
   8971 		txs = &txq->txq_soft[txq->txq_snext];
   8972 		dmamap = txs->txs_dmamap;
   8973 
   8974 		use_tso = (m0->m_pkthdr.csum_flags &
   8975 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8976 
   8977 		/*
   8978 		 * So says the Linux driver:
   8979 		 * The controller does a simple calculation to make sure
   8980 		 * there is enough room in the FIFO before initiating the
   8981 		 * DMA for each buffer. The calc is:
   8982 		 *	4 = ceil(buffer len / MSS)
   8983 		 * To make sure we don't overrun the FIFO, adjust the max
   8984 		 * buffer len if the MSS drops.
   8985 		 */
   8986 		dmamap->dm_maxsegsz =
   8987 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8988 		    ? m0->m_pkthdr.segsz << 2
   8989 		    : WTX_MAX_LEN;
   8990 
   8991 		/*
   8992 		 * Load the DMA map.  If this fails, the packet either
   8993 		 * didn't fit in the allotted number of segments, or we
   8994 		 * were short on resources.  For the too-many-segments
   8995 		 * case, we simply report an error and drop the packet,
   8996 		 * since we can't sanely copy a jumbo packet to a single
   8997 		 * buffer.
   8998 		 */
   8999 retry:
   9000 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9001 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9002 		if (__predict_false(error)) {
   9003 			if (error == EFBIG) {
   9004 				if (remap == true) {
   9005 					struct mbuf *m;
   9006 
   9007 					remap = false;
   9008 					m = m_defrag(m0, M_NOWAIT);
   9009 					if (m != NULL) {
   9010 						WM_Q_EVCNT_INCR(txq, defrag);
   9011 						m0 = m;
   9012 						goto retry;
   9013 					}
   9014 				}
   9015 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9016 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9017 				    "DMA segments, dropping...\n",
   9018 				    device_xname(sc->sc_dev));
   9019 				wm_dump_mbuf_chain(sc, m0);
   9020 				m_freem(m0);
   9021 				continue;
   9022 			}
   9023 			/* Short on resources, just stop for now. */
   9024 			DPRINTF(sc, WM_DEBUG_TX,
   9025 			    ("%s: TX: dmamap load failed: %d\n",
   9026 				device_xname(sc->sc_dev), error));
   9027 			break;
   9028 		}
   9029 
   9030 		segs_needed = dmamap->dm_nsegs;
   9031 		if (use_tso) {
   9032 			/* For sentinel descriptor; see below. */
   9033 			segs_needed++;
   9034 		}
   9035 
   9036 		/*
   9037 		 * Ensure we have enough descriptors free to describe
   9038 		 * the packet. Note, we always reserve one descriptor
   9039 		 * at the end of the ring due to the semantics of the
   9040 		 * TDT register, plus one more in the event we need
   9041 		 * to load offload context.
   9042 		 */
   9043 		if (segs_needed > txq->txq_free - 2) {
   9044 			/*
   9045 			 * Not enough free descriptors to transmit this
   9046 			 * packet.  We haven't committed anything yet,
   9047 			 * so just unload the DMA map, put the packet
   9048 			 * pack on the queue, and punt. Notify the upper
   9049 			 * layer that there are no more slots left.
   9050 			 */
   9051 			DPRINTF(sc, WM_DEBUG_TX,
   9052 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9053 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9054 				segs_needed, txq->txq_free - 1));
   9055 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9056 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9057 			WM_Q_EVCNT_INCR(txq, txdstall);
   9058 			break;
   9059 		}
   9060 
   9061 		/*
   9062 		 * Check for 82547 Tx FIFO bug. We need to do this
   9063 		 * once we know we can transmit the packet, since we
   9064 		 * do some internal FIFO space accounting here.
   9065 		 */
   9066 		if (sc->sc_type == WM_T_82547 &&
   9067 		    wm_82547_txfifo_bugchk(sc, m0)) {
   9068 			DPRINTF(sc, WM_DEBUG_TX,
   9069 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   9070 				device_xname(sc->sc_dev)));
   9071 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9072 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9073 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   9074 			break;
   9075 		}
   9076 
   9077 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9078 
   9079 		DPRINTF(sc, WM_DEBUG_TX,
   9080 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9081 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9082 
   9083 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9084 
   9085 		/*
   9086 		 * Store a pointer to the packet so that we can free it
   9087 		 * later.
   9088 		 *
   9089 		 * Initially, we consider the number of descriptors the
   9090 		 * packet uses the number of DMA segments.  This may be
   9091 		 * incremented by 1 if we do checksum offload (a descriptor
   9092 		 * is used to set the checksum context).
   9093 		 */
   9094 		txs->txs_mbuf = m0;
   9095 		txs->txs_firstdesc = txq->txq_next;
   9096 		txs->txs_ndesc = segs_needed;
   9097 
   9098 		/* Set up offload parameters for this packet. */
   9099 		if (m0->m_pkthdr.csum_flags &
   9100 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9101 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9102 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9103 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   9104 		} else {
   9105 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   9106 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   9107 			cksumcmd = 0;
   9108 			cksumfields = 0;
   9109 		}
   9110 
   9111 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   9112 
   9113 		/* Sync the DMA map. */
   9114 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9115 		    BUS_DMASYNC_PREWRITE);
   9116 
   9117 		/* Initialize the transmit descriptor. */
   9118 		for (nexttx = txq->txq_next, seg = 0;
   9119 		     seg < dmamap->dm_nsegs; seg++) {
   9120 			for (seglen = dmamap->dm_segs[seg].ds_len,
   9121 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   9122 			     seglen != 0;
   9123 			     curaddr += curlen, seglen -= curlen,
   9124 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   9125 				curlen = seglen;
   9126 
   9127 				/*
   9128 				 * So says the Linux driver:
   9129 				 * Work around for premature descriptor
   9130 				 * write-backs in TSO mode.  Append a
   9131 				 * 4-byte sentinel descriptor.
   9132 				 */
   9133 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   9134 				    curlen > 8)
   9135 					curlen -= 4;
   9136 
   9137 				wm_set_dma_addr(
   9138 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   9139 				txq->txq_descs[nexttx].wtx_cmdlen
   9140 				    = htole32(cksumcmd | curlen);
   9141 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   9142 				    = 0;
   9143 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   9144 				    = cksumfields;
   9145 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9146 				lasttx = nexttx;
   9147 
   9148 				DPRINTF(sc, WM_DEBUG_TX,
   9149 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   9150 					"len %#04zx\n",
   9151 					device_xname(sc->sc_dev), nexttx,
   9152 					(uint64_t)curaddr, curlen));
   9153 			}
   9154 		}
   9155 
   9156 		KASSERT(lasttx != -1);
   9157 
   9158 		/*
   9159 		 * Set up the command byte on the last descriptor of
   9160 		 * the packet. If we're in the interrupt delay window,
   9161 		 * delay the interrupt.
   9162 		 */
   9163 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9164 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9165 
   9166 		/*
   9167 		 * If VLANs are enabled and the packet has a VLAN tag, set
   9168 		 * up the descriptor to encapsulate the packet for us.
   9169 		 *
   9170 		 * This is only valid on the last descriptor of the packet.
   9171 		 */
   9172 		if (vlan_has_tag(m0)) {
   9173 			txq->txq_descs[lasttx].wtx_cmdlen |=
   9174 			    htole32(WTX_CMD_VLE);
   9175 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   9176 			    = htole16(vlan_get_tag(m0));
   9177 		}
   9178 
   9179 		txs->txs_lastdesc = lasttx;
   9180 
   9181 		DPRINTF(sc, WM_DEBUG_TX,
   9182 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9183 			device_xname(sc->sc_dev),
   9184 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9185 
   9186 		/* Sync the descriptors we're using. */
   9187 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9188 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9189 
   9190 		/* Give the packet to the chip. */
   9191 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9192 
   9193 		DPRINTF(sc, WM_DEBUG_TX,
   9194 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9195 
   9196 		DPRINTF(sc, WM_DEBUG_TX,
   9197 		    ("%s: TX: finished transmitting packet, job %d\n",
   9198 			device_xname(sc->sc_dev), txq->txq_snext));
   9199 
   9200 		/* Advance the tx pointer. */
   9201 		txq->txq_free -= txs->txs_ndesc;
   9202 		txq->txq_next = nexttx;
   9203 
   9204 		txq->txq_sfree--;
   9205 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9206 
   9207 		/* Pass the packet to any BPF listeners. */
   9208 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9209 	}
   9210 
   9211 	if (m0 != NULL) {
   9212 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9213 		WM_Q_EVCNT_INCR(txq, descdrop);
   9214 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9215 			__func__));
   9216 		m_freem(m0);
   9217 	}
   9218 
   9219 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9220 		/* No more slots; notify upper layer. */
   9221 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9222 	}
   9223 
   9224 	if (txq->txq_free != ofree) {
   9225 		/* Set a watchdog timer in case the chip flakes out. */
   9226 		txq->txq_lastsent = time_uptime;
   9227 		txq->txq_sending = true;
   9228 	}
   9229 }
   9230 
   9231 /*
   9232  * wm_nq_tx_offload:
   9233  *
   9234  *	Set up TCP/IP checksumming parameters for the
   9235  *	specified packet, for NEWQUEUE devices
   9236  */
   9237 static void
   9238 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   9239     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   9240 {
   9241 	struct mbuf *m0 = txs->txs_mbuf;
   9242 	uint32_t vl_len, mssidx, cmdc;
   9243 	struct ether_header *eh;
   9244 	int offset, iphl;
   9245 
   9246 	/*
   9247 	 * XXX It would be nice if the mbuf pkthdr had offset
   9248 	 * fields for the protocol headers.
   9249 	 */
   9250 	*cmdlenp = 0;
   9251 	*fieldsp = 0;
   9252 
   9253 	eh = mtod(m0, struct ether_header *);
   9254 	switch (htons(eh->ether_type)) {
   9255 	case ETHERTYPE_IP:
   9256 	case ETHERTYPE_IPV6:
   9257 		offset = ETHER_HDR_LEN;
   9258 		break;
   9259 
   9260 	case ETHERTYPE_VLAN:
   9261 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   9262 		break;
   9263 
   9264 	default:
   9265 		/* Don't support this protocol or encapsulation. */
   9266 		*do_csum = false;
   9267 		return;
   9268 	}
   9269 	*do_csum = true;
   9270 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   9271 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   9272 
   9273 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   9274 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   9275 
   9276 	if ((m0->m_pkthdr.csum_flags &
   9277 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   9278 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   9279 	} else {
   9280 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   9281 	}
   9282 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   9283 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   9284 
   9285 	if (vlan_has_tag(m0)) {
   9286 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   9287 		    << NQTXC_VLLEN_VLAN_SHIFT);
   9288 		*cmdlenp |= NQTX_CMD_VLE;
   9289 	}
   9290 
   9291 	mssidx = 0;
   9292 
   9293 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   9294 		int hlen = offset + iphl;
   9295 		int tcp_hlen;
   9296 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   9297 
   9298 		if (__predict_false(m0->m_len <
   9299 				    (hlen + sizeof(struct tcphdr)))) {
   9300 			/*
   9301 			 * TCP/IP headers are not in the first mbuf; we need
   9302 			 * to do this the slow and painful way. Let's just
   9303 			 * hope this doesn't happen very often.
   9304 			 */
   9305 			struct tcphdr th;
   9306 
   9307 			WM_Q_EVCNT_INCR(txq, tsopain);
   9308 
   9309 			m_copydata(m0, hlen, sizeof(th), &th);
   9310 			if (v4) {
   9311 				struct ip ip;
   9312 
   9313 				m_copydata(m0, offset, sizeof(ip), &ip);
   9314 				ip.ip_len = 0;
   9315 				m_copyback(m0,
   9316 				    offset + offsetof(struct ip, ip_len),
   9317 				    sizeof(ip.ip_len), &ip.ip_len);
   9318 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   9319 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   9320 			} else {
   9321 				struct ip6_hdr ip6;
   9322 
   9323 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   9324 				ip6.ip6_plen = 0;
   9325 				m_copyback(m0,
   9326 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   9327 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   9328 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   9329 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   9330 			}
   9331 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   9332 			    sizeof(th.th_sum), &th.th_sum);
   9333 
   9334 			tcp_hlen = th.th_off << 2;
   9335 		} else {
   9336 			/*
   9337 			 * TCP/IP headers are in the first mbuf; we can do
   9338 			 * this the easy way.
   9339 			 */
   9340 			struct tcphdr *th;
   9341 
   9342 			if (v4) {
   9343 				struct ip *ip =
   9344 				    (void *)(mtod(m0, char *) + offset);
   9345 				th = (void *)(mtod(m0, char *) + hlen);
   9346 
   9347 				ip->ip_len = 0;
   9348 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   9349 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   9350 			} else {
   9351 				struct ip6_hdr *ip6 =
   9352 				    (void *)(mtod(m0, char *) + offset);
   9353 				th = (void *)(mtod(m0, char *) + hlen);
   9354 
   9355 				ip6->ip6_plen = 0;
   9356 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   9357 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   9358 			}
   9359 			tcp_hlen = th->th_off << 2;
   9360 		}
   9361 		hlen += tcp_hlen;
   9362 		*cmdlenp |= NQTX_CMD_TSE;
   9363 
   9364 		if (v4) {
   9365 			WM_Q_EVCNT_INCR(txq, tso);
   9366 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   9367 		} else {
   9368 			WM_Q_EVCNT_INCR(txq, tso6);
   9369 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   9370 		}
   9371 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   9372 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9373 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   9374 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   9375 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   9376 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   9377 	} else {
   9378 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   9379 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9380 	}
   9381 
   9382 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   9383 		*fieldsp |= NQTXD_FIELDS_IXSM;
   9384 		cmdc |= NQTXC_CMD_IP4;
   9385 	}
   9386 
   9387 	if (m0->m_pkthdr.csum_flags &
   9388 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   9389 		WM_Q_EVCNT_INCR(txq, tusum);
   9390 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   9391 			cmdc |= NQTXC_CMD_TCP;
   9392 		else
   9393 			cmdc |= NQTXC_CMD_UDP;
   9394 
   9395 		cmdc |= NQTXC_CMD_IP4;
   9396 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9397 	}
   9398 	if (m0->m_pkthdr.csum_flags &
   9399 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   9400 		WM_Q_EVCNT_INCR(txq, tusum6);
   9401 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   9402 			cmdc |= NQTXC_CMD_TCP;
   9403 		else
   9404 			cmdc |= NQTXC_CMD_UDP;
   9405 
   9406 		cmdc |= NQTXC_CMD_IP6;
   9407 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9408 	}
   9409 
   9410 	/*
   9411 	 * We don't have to write context descriptor for every packet to
   9412 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   9413 	 * I210 and I211. It is enough to write once per a Tx queue for these
   9414 	 * controllers.
   9415 	 * It would be overhead to write context descriptor for every packet,
   9416 	 * however it does not cause problems.
   9417 	 */
   9418 	/* Fill in the context descriptor. */
   9419 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
   9420 	    htole32(vl_len);
   9421 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
   9422 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
   9423 	    htole32(cmdc);
   9424 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
   9425 	    htole32(mssidx);
   9426 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   9427 	DPRINTF(sc, WM_DEBUG_TX,
   9428 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   9429 		txq->txq_next, 0, vl_len));
   9430 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   9431 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   9432 	txs->txs_ndesc++;
   9433 }
   9434 
   9435 /*
   9436  * wm_nq_start:		[ifnet interface function]
   9437  *
   9438  *	Start packet transmission on the interface for NEWQUEUE devices
   9439  */
   9440 static void
   9441 wm_nq_start(struct ifnet *ifp)
   9442 {
   9443 	struct wm_softc *sc = ifp->if_softc;
   9444 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9445 
   9446 	KASSERT(if_is_mpsafe(ifp));
   9447 	/*
   9448 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   9449 	 */
   9450 
   9451 	mutex_enter(txq->txq_lock);
   9452 	if (!txq->txq_stopping)
   9453 		wm_nq_start_locked(ifp);
   9454 	mutex_exit(txq->txq_lock);
   9455 }
   9456 
   9457 static void
   9458 wm_nq_start_locked(struct ifnet *ifp)
   9459 {
   9460 	struct wm_softc *sc = ifp->if_softc;
   9461 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9462 
   9463 	wm_nq_send_common_locked(ifp, txq, false);
   9464 }
   9465 
   9466 static int
   9467 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   9468 {
   9469 	int qid;
   9470 	struct wm_softc *sc = ifp->if_softc;
   9471 	struct wm_txqueue *txq;
   9472 
   9473 	qid = wm_select_txqueue(ifp, m);
   9474 	txq = &sc->sc_queue[qid].wmq_txq;
   9475 
   9476 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   9477 		m_freem(m);
   9478 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   9479 		return ENOBUFS;
   9480 	}
   9481 
   9482 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   9483 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9484 	if (m->m_flags & M_MCAST)
   9485 		if_statinc_ref(nsr, if_omcasts);
   9486 	IF_STAT_PUTREF(ifp);
   9487 
   9488 	/*
   9489 	 * The situations which this mutex_tryenter() fails at running time
   9490 	 * are below two patterns.
   9491 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9492 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9493 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9494 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9495 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9496 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9497 	 * stuck, either.
   9498 	 */
   9499 	if (mutex_tryenter(txq->txq_lock)) {
   9500 		if (!txq->txq_stopping)
   9501 			wm_nq_transmit_locked(ifp, txq);
   9502 		mutex_exit(txq->txq_lock);
   9503 	}
   9504 
   9505 	return 0;
   9506 }
   9507 
   9508 static void
   9509 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9510 {
   9511 
   9512 	wm_nq_send_common_locked(ifp, txq, true);
   9513 }
   9514 
   9515 static void
   9516 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9517     bool is_transmit)
   9518 {
   9519 	struct wm_softc *sc = ifp->if_softc;
   9520 	struct mbuf *m0;
   9521 	struct wm_txsoft *txs;
   9522 	bus_dmamap_t dmamap;
   9523 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9524 	bool do_csum, sent;
   9525 	bool remap = true;
   9526 
   9527 	KASSERT(mutex_owned(txq->txq_lock));
   9528 	KASSERT(!txq->txq_stopping);
   9529 
   9530 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9531 		return;
   9532 
   9533 	if (__predict_false(wm_linkdown_discard(txq))) {
   9534 		do {
   9535 			if (is_transmit)
   9536 				m0 = pcq_get(txq->txq_interq);
   9537 			else
   9538 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9539 			/*
   9540 			 * increment successed packet counter as in the case
   9541 			 * which the packet is discarded by link down PHY.
   9542 			 */
   9543 			if (m0 != NULL) {
   9544 				if_statinc(ifp, if_opackets);
   9545 				m_freem(m0);
   9546 			}
   9547 		} while (m0 != NULL);
   9548 		return;
   9549 	}
   9550 
   9551 	sent = false;
   9552 
   9553 	/*
   9554 	 * Loop through the send queue, setting up transmit descriptors
   9555 	 * until we drain the queue, or use up all available transmit
   9556 	 * descriptors.
   9557 	 */
   9558 	for (;;) {
   9559 		m0 = NULL;
   9560 
   9561 		/* Get a work queue entry. */
   9562 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9563 			wm_txeof(txq, UINT_MAX);
   9564 			if (txq->txq_sfree == 0) {
   9565 				DPRINTF(sc, WM_DEBUG_TX,
   9566 				    ("%s: TX: no free job descriptors\n",
   9567 					device_xname(sc->sc_dev)));
   9568 				WM_Q_EVCNT_INCR(txq, txsstall);
   9569 				break;
   9570 			}
   9571 		}
   9572 
   9573 		/* Grab a packet off the queue. */
   9574 		if (is_transmit)
   9575 			m0 = pcq_get(txq->txq_interq);
   9576 		else
   9577 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9578 		if (m0 == NULL)
   9579 			break;
   9580 
   9581 		DPRINTF(sc, WM_DEBUG_TX,
   9582 		    ("%s: TX: have packet to transmit: %p\n",
   9583 			device_xname(sc->sc_dev), m0));
   9584 
   9585 		txs = &txq->txq_soft[txq->txq_snext];
   9586 		dmamap = txs->txs_dmamap;
   9587 
   9588 		/*
   9589 		 * Load the DMA map.  If this fails, the packet either
   9590 		 * didn't fit in the allotted number of segments, or we
   9591 		 * were short on resources.  For the too-many-segments
   9592 		 * case, we simply report an error and drop the packet,
   9593 		 * since we can't sanely copy a jumbo packet to a single
   9594 		 * buffer.
   9595 		 */
   9596 retry:
   9597 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9598 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9599 		if (__predict_false(error)) {
   9600 			if (error == EFBIG) {
   9601 				if (remap == true) {
   9602 					struct mbuf *m;
   9603 
   9604 					remap = false;
   9605 					m = m_defrag(m0, M_NOWAIT);
   9606 					if (m != NULL) {
   9607 						WM_Q_EVCNT_INCR(txq, defrag);
   9608 						m0 = m;
   9609 						goto retry;
   9610 					}
   9611 				}
   9612 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9613 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9614 				    "DMA segments, dropping...\n",
   9615 				    device_xname(sc->sc_dev));
   9616 				wm_dump_mbuf_chain(sc, m0);
   9617 				m_freem(m0);
   9618 				continue;
   9619 			}
   9620 			/* Short on resources, just stop for now. */
   9621 			DPRINTF(sc, WM_DEBUG_TX,
   9622 			    ("%s: TX: dmamap load failed: %d\n",
   9623 				device_xname(sc->sc_dev), error));
   9624 			break;
   9625 		}
   9626 
   9627 		segs_needed = dmamap->dm_nsegs;
   9628 
   9629 		/*
   9630 		 * Ensure we have enough descriptors free to describe
   9631 		 * the packet. Note, we always reserve one descriptor
   9632 		 * at the end of the ring due to the semantics of the
   9633 		 * TDT register, plus one more in the event we need
   9634 		 * to load offload context.
   9635 		 */
   9636 		if (segs_needed > txq->txq_free - 2) {
   9637 			/*
   9638 			 * Not enough free descriptors to transmit this
   9639 			 * packet.  We haven't committed anything yet,
   9640 			 * so just unload the DMA map, put the packet
   9641 			 * pack on the queue, and punt. Notify the upper
   9642 			 * layer that there are no more slots left.
   9643 			 */
   9644 			DPRINTF(sc, WM_DEBUG_TX,
   9645 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9646 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9647 				segs_needed, txq->txq_free - 1));
   9648 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9649 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9650 			WM_Q_EVCNT_INCR(txq, txdstall);
   9651 			break;
   9652 		}
   9653 
   9654 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9655 
   9656 		DPRINTF(sc, WM_DEBUG_TX,
   9657 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9658 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9659 
   9660 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9661 
   9662 		/*
   9663 		 * Store a pointer to the packet so that we can free it
   9664 		 * later.
   9665 		 *
   9666 		 * Initially, we consider the number of descriptors the
   9667 		 * packet uses the number of DMA segments.  This may be
   9668 		 * incremented by 1 if we do checksum offload (a descriptor
   9669 		 * is used to set the checksum context).
   9670 		 */
   9671 		txs->txs_mbuf = m0;
   9672 		txs->txs_firstdesc = txq->txq_next;
   9673 		txs->txs_ndesc = segs_needed;
   9674 
   9675 		/* Set up offload parameters for this packet. */
   9676 		uint32_t cmdlen, fields, dcmdlen;
   9677 		if (m0->m_pkthdr.csum_flags &
   9678 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9679 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9680 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9681 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9682 			    &do_csum);
   9683 		} else {
   9684 			do_csum = false;
   9685 			cmdlen = 0;
   9686 			fields = 0;
   9687 		}
   9688 
   9689 		/* Sync the DMA map. */
   9690 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9691 		    BUS_DMASYNC_PREWRITE);
   9692 
   9693 		/* Initialize the first transmit descriptor. */
   9694 		nexttx = txq->txq_next;
   9695 		if (!do_csum) {
   9696 			/* Set up a legacy descriptor */
   9697 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9698 			    dmamap->dm_segs[0].ds_addr);
   9699 			txq->txq_descs[nexttx].wtx_cmdlen =
   9700 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9701 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9702 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9703 			if (vlan_has_tag(m0)) {
   9704 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9705 				    htole32(WTX_CMD_VLE);
   9706 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9707 				    htole16(vlan_get_tag(m0));
   9708 			} else
   9709 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9710 
   9711 			dcmdlen = 0;
   9712 		} else {
   9713 			/* Set up an advanced data descriptor */
   9714 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9715 			    htole64(dmamap->dm_segs[0].ds_addr);
   9716 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9717 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9718 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9719 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9720 			    htole32(fields);
   9721 			DPRINTF(sc, WM_DEBUG_TX,
   9722 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9723 				device_xname(sc->sc_dev), nexttx,
   9724 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9725 			DPRINTF(sc, WM_DEBUG_TX,
   9726 			    ("\t 0x%08x%08x\n", fields,
   9727 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9728 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9729 		}
   9730 
   9731 		lasttx = nexttx;
   9732 		nexttx = WM_NEXTTX(txq, nexttx);
   9733 		/*
   9734 		 * Fill in the next descriptors. Legacy or advanced format
   9735 		 * is the same here.
   9736 		 */
   9737 		for (seg = 1; seg < dmamap->dm_nsegs;
   9738 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9739 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9740 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9741 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9742 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9743 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9744 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9745 			lasttx = nexttx;
   9746 
   9747 			DPRINTF(sc, WM_DEBUG_TX,
   9748 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9749 				device_xname(sc->sc_dev), nexttx,
   9750 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9751 				dmamap->dm_segs[seg].ds_len));
   9752 		}
   9753 
   9754 		KASSERT(lasttx != -1);
   9755 
   9756 		/*
   9757 		 * Set up the command byte on the last descriptor of
   9758 		 * the packet. If we're in the interrupt delay window,
   9759 		 * delay the interrupt.
   9760 		 */
   9761 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9762 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9763 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9764 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9765 
   9766 		txs->txs_lastdesc = lasttx;
   9767 
   9768 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9769 		    device_xname(sc->sc_dev),
   9770 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9771 
   9772 		/* Sync the descriptors we're using. */
   9773 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9774 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9775 
   9776 		/* Give the packet to the chip. */
   9777 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9778 		sent = true;
   9779 
   9780 		DPRINTF(sc, WM_DEBUG_TX,
   9781 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9782 
   9783 		DPRINTF(sc, WM_DEBUG_TX,
   9784 		    ("%s: TX: finished transmitting packet, job %d\n",
   9785 			device_xname(sc->sc_dev), txq->txq_snext));
   9786 
   9787 		/* Advance the tx pointer. */
   9788 		txq->txq_free -= txs->txs_ndesc;
   9789 		txq->txq_next = nexttx;
   9790 
   9791 		txq->txq_sfree--;
   9792 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9793 
   9794 		/* Pass the packet to any BPF listeners. */
   9795 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9796 	}
   9797 
   9798 	if (m0 != NULL) {
   9799 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9800 		WM_Q_EVCNT_INCR(txq, descdrop);
   9801 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9802 			__func__));
   9803 		m_freem(m0);
   9804 	}
   9805 
   9806 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9807 		/* No more slots; notify upper layer. */
   9808 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9809 	}
   9810 
   9811 	if (sent) {
   9812 		/* Set a watchdog timer in case the chip flakes out. */
   9813 		txq->txq_lastsent = time_uptime;
   9814 		txq->txq_sending = true;
   9815 	}
   9816 }
   9817 
   9818 static void
   9819 wm_deferred_start_locked(struct wm_txqueue *txq)
   9820 {
   9821 	struct wm_softc *sc = txq->txq_sc;
   9822 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9823 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9824 	int qid = wmq->wmq_id;
   9825 
   9826 	KASSERT(mutex_owned(txq->txq_lock));
   9827 	KASSERT(!txq->txq_stopping);
   9828 
   9829 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9830 		/* XXX need for ALTQ or one CPU system */
   9831 		if (qid == 0)
   9832 			wm_nq_start_locked(ifp);
   9833 		wm_nq_transmit_locked(ifp, txq);
   9834 	} else {
   9835 		/* XXX need for ALTQ or one CPU system */
   9836 		if (qid == 0)
   9837 			wm_start_locked(ifp);
   9838 		wm_transmit_locked(ifp, txq);
   9839 	}
   9840 }
   9841 
   9842 /* Interrupt */
   9843 
   9844 /*
   9845  * wm_txeof:
   9846  *
   9847  *	Helper; handle transmit interrupts.
   9848  */
   9849 static bool
   9850 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9851 {
   9852 	struct wm_softc *sc = txq->txq_sc;
   9853 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9854 	struct wm_txsoft *txs;
   9855 	int count = 0;
   9856 	int i;
   9857 	uint8_t status;
   9858 	bool more = false;
   9859 
   9860 	KASSERT(mutex_owned(txq->txq_lock));
   9861 
   9862 	if (txq->txq_stopping)
   9863 		return false;
   9864 
   9865 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9866 
   9867 	/*
   9868 	 * Go through the Tx list and free mbufs for those
   9869 	 * frames which have been transmitted.
   9870 	 */
   9871 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9872 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9873 		txs = &txq->txq_soft[i];
   9874 
   9875 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9876 			device_xname(sc->sc_dev), i));
   9877 
   9878 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9879 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9880 
   9881 		status =
   9882 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9883 		if ((status & WTX_ST_DD) == 0) {
   9884 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9885 			    BUS_DMASYNC_PREREAD);
   9886 			break;
   9887 		}
   9888 
   9889 		if (limit-- == 0) {
   9890 			more = true;
   9891 			DPRINTF(sc, WM_DEBUG_TX,
   9892 			    ("%s: TX: loop limited, job %d is not processed\n",
   9893 				device_xname(sc->sc_dev), i));
   9894 			break;
   9895 		}
   9896 
   9897 		count++;
   9898 		DPRINTF(sc, WM_DEBUG_TX,
   9899 		    ("%s: TX: job %d done: descs %d..%d\n",
   9900 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9901 		    txs->txs_lastdesc));
   9902 
   9903 #ifdef WM_EVENT_COUNTERS
   9904 		if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
   9905 			WM_Q_EVCNT_INCR(txq, underrun);
   9906 #endif /* WM_EVENT_COUNTERS */
   9907 
   9908 		/*
   9909 		 * 82574 and newer's document says the status field has neither
   9910 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9911 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9912 		 * Developer's Manual", 82574 datasheet and newer.
   9913 		 *
   9914 		 * XXX I saw the LC bit was set on I218 even though the media
   9915 		 * was full duplex, so the bit might be used for other
   9916 		 * meaning ...(I have no document).
   9917 		 */
   9918 
   9919 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9920 		    && ((sc->sc_type < WM_T_82574)
   9921 			|| (sc->sc_type == WM_T_80003))) {
   9922 			if_statinc(ifp, if_oerrors);
   9923 			if (status & WTX_ST_LC)
   9924 				log(LOG_WARNING, "%s: late collision\n",
   9925 				    device_xname(sc->sc_dev));
   9926 			else if (status & WTX_ST_EC) {
   9927 				if_statadd(ifp, if_collisions,
   9928 				    TX_COLLISION_THRESHOLD + 1);
   9929 				log(LOG_WARNING, "%s: excessive collisions\n",
   9930 				    device_xname(sc->sc_dev));
   9931 			}
   9932 		} else
   9933 			if_statinc(ifp, if_opackets);
   9934 
   9935 		txq->txq_packets++;
   9936 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9937 
   9938 		txq->txq_free += txs->txs_ndesc;
   9939 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9940 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9941 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9942 		m_freem(txs->txs_mbuf);
   9943 		txs->txs_mbuf = NULL;
   9944 	}
   9945 
   9946 	/* Update the dirty transmit buffer pointer. */
   9947 	txq->txq_sdirty = i;
   9948 	DPRINTF(sc, WM_DEBUG_TX,
   9949 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9950 
   9951 	if (count != 0)
   9952 		rnd_add_uint32(&sc->rnd_source, count);
   9953 
   9954 	/*
   9955 	 * If there are no more pending transmissions, cancel the watchdog
   9956 	 * timer.
   9957 	 */
   9958 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9959 		txq->txq_sending = false;
   9960 
   9961 	return more;
   9962 }
   9963 
   9964 static inline uint32_t
   9965 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9966 {
   9967 	struct wm_softc *sc = rxq->rxq_sc;
   9968 
   9969 	if (sc->sc_type == WM_T_82574)
   9970 		return EXTRXC_STATUS(
   9971 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9972 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9973 		return NQRXC_STATUS(
   9974 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9975 	else
   9976 		return rxq->rxq_descs[idx].wrx_status;
   9977 }
   9978 
   9979 static inline uint32_t
   9980 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9981 {
   9982 	struct wm_softc *sc = rxq->rxq_sc;
   9983 
   9984 	if (sc->sc_type == WM_T_82574)
   9985 		return EXTRXC_ERROR(
   9986 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9987 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9988 		return NQRXC_ERROR(
   9989 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9990 	else
   9991 		return rxq->rxq_descs[idx].wrx_errors;
   9992 }
   9993 
   9994 static inline uint16_t
   9995 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9996 {
   9997 	struct wm_softc *sc = rxq->rxq_sc;
   9998 
   9999 	if (sc->sc_type == WM_T_82574)
   10000 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   10001 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10002 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   10003 	else
   10004 		return rxq->rxq_descs[idx].wrx_special;
   10005 }
   10006 
   10007 static inline int
   10008 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   10009 {
   10010 	struct wm_softc *sc = rxq->rxq_sc;
   10011 
   10012 	if (sc->sc_type == WM_T_82574)
   10013 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   10014 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10015 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   10016 	else
   10017 		return rxq->rxq_descs[idx].wrx_len;
   10018 }
   10019 
   10020 #ifdef WM_DEBUG
   10021 static inline uint32_t
   10022 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   10023 {
   10024 	struct wm_softc *sc = rxq->rxq_sc;
   10025 
   10026 	if (sc->sc_type == WM_T_82574)
   10027 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   10028 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10029 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   10030 	else
   10031 		return 0;
   10032 }
   10033 
   10034 static inline uint8_t
   10035 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   10036 {
   10037 	struct wm_softc *sc = rxq->rxq_sc;
   10038 
   10039 	if (sc->sc_type == WM_T_82574)
   10040 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   10041 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10042 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   10043 	else
   10044 		return 0;
   10045 }
   10046 #endif /* WM_DEBUG */
   10047 
   10048 static inline bool
   10049 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   10050     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   10051 {
   10052 
   10053 	if (sc->sc_type == WM_T_82574)
   10054 		return (status & ext_bit) != 0;
   10055 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10056 		return (status & nq_bit) != 0;
   10057 	else
   10058 		return (status & legacy_bit) != 0;
   10059 }
   10060 
   10061 static inline bool
   10062 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   10063     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   10064 {
   10065 
   10066 	if (sc->sc_type == WM_T_82574)
   10067 		return (error & ext_bit) != 0;
   10068 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   10069 		return (error & nq_bit) != 0;
   10070 	else
   10071 		return (error & legacy_bit) != 0;
   10072 }
   10073 
   10074 static inline bool
   10075 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   10076 {
   10077 
   10078 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10079 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   10080 		return true;
   10081 	else
   10082 		return false;
   10083 }
   10084 
   10085 static inline bool
   10086 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   10087 {
   10088 	struct wm_softc *sc = rxq->rxq_sc;
   10089 
   10090 	/* XXX missing error bit for newqueue? */
   10091 	if (wm_rxdesc_is_set_error(sc, errors,
   10092 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   10093 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   10094 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   10095 		NQRXC_ERROR_RXE)) {
   10096 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   10097 		    EXTRXC_ERROR_SE, 0))
   10098 			log(LOG_WARNING, "%s: symbol error\n",
   10099 			    device_xname(sc->sc_dev));
   10100 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   10101 		    EXTRXC_ERROR_SEQ, 0))
   10102 			log(LOG_WARNING, "%s: receive sequence error\n",
   10103 			    device_xname(sc->sc_dev));
   10104 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   10105 		    EXTRXC_ERROR_CE, 0))
   10106 			log(LOG_WARNING, "%s: CRC error\n",
   10107 			    device_xname(sc->sc_dev));
   10108 		return true;
   10109 	}
   10110 
   10111 	return false;
   10112 }
   10113 
   10114 static inline bool
   10115 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   10116 {
   10117 	struct wm_softc *sc = rxq->rxq_sc;
   10118 
   10119 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   10120 		NQRXC_STATUS_DD)) {
   10121 		/* We have processed all of the receive descriptors. */
   10122 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   10123 		return false;
   10124 	}
   10125 
   10126 	return true;
   10127 }
   10128 
   10129 static inline bool
   10130 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   10131     uint16_t vlantag, struct mbuf *m)
   10132 {
   10133 
   10134 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   10135 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   10136 		vlan_set_tag(m, le16toh(vlantag));
   10137 	}
   10138 
   10139 	return true;
   10140 }
   10141 
   10142 static inline void
   10143 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   10144     uint32_t errors, struct mbuf *m)
   10145 {
   10146 	struct wm_softc *sc = rxq->rxq_sc;
   10147 
   10148 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   10149 		if (wm_rxdesc_is_set_status(sc, status,
   10150 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   10151 			WM_Q_EVCNT_INCR(rxq, ipsum);
   10152 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   10153 			if (wm_rxdesc_is_set_error(sc, errors,
   10154 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   10155 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   10156 		}
   10157 		if (wm_rxdesc_is_set_status(sc, status,
   10158 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   10159 			/*
   10160 			 * Note: we don't know if this was TCP or UDP,
   10161 			 * so we just set both bits, and expect the
   10162 			 * upper layers to deal.
   10163 			 */
   10164 			WM_Q_EVCNT_INCR(rxq, tusum);
   10165 			m->m_pkthdr.csum_flags |=
   10166 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   10167 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   10168 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   10169 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   10170 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   10171 		}
   10172 	}
   10173 }
   10174 
   10175 /*
   10176  * wm_rxeof:
   10177  *
   10178  *	Helper; handle receive interrupts.
   10179  */
   10180 static bool
   10181 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   10182 {
   10183 	struct wm_softc *sc = rxq->rxq_sc;
   10184 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10185 	struct wm_rxsoft *rxs;
   10186 	struct mbuf *m;
   10187 	int i, len;
   10188 	int count = 0;
   10189 	uint32_t status, errors;
   10190 	uint16_t vlantag;
   10191 	bool more = false;
   10192 
   10193 	KASSERT(mutex_owned(rxq->rxq_lock));
   10194 
   10195 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   10196 		rxs = &rxq->rxq_soft[i];
   10197 
   10198 		DPRINTF(sc, WM_DEBUG_RX,
   10199 		    ("%s: RX: checking descriptor %d\n",
   10200 			device_xname(sc->sc_dev), i));
   10201 		wm_cdrxsync(rxq, i,
   10202 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   10203 
   10204 		status = wm_rxdesc_get_status(rxq, i);
   10205 		errors = wm_rxdesc_get_errors(rxq, i);
   10206 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   10207 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   10208 #ifdef WM_DEBUG
   10209 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   10210 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   10211 #endif
   10212 
   10213 		if (!wm_rxdesc_dd(rxq, i, status))
   10214 			break;
   10215 
   10216 		if (limit-- == 0) {
   10217 			more = true;
   10218 			DPRINTF(sc, WM_DEBUG_RX,
   10219 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   10220 				device_xname(sc->sc_dev), i));
   10221 			break;
   10222 		}
   10223 
   10224 		count++;
   10225 		if (__predict_false(rxq->rxq_discard)) {
   10226 			DPRINTF(sc, WM_DEBUG_RX,
   10227 			    ("%s: RX: discarding contents of descriptor %d\n",
   10228 				device_xname(sc->sc_dev), i));
   10229 			wm_init_rxdesc(rxq, i);
   10230 			if (wm_rxdesc_is_eop(rxq, status)) {
   10231 				/* Reset our state. */
   10232 				DPRINTF(sc, WM_DEBUG_RX,
   10233 				    ("%s: RX: resetting rxdiscard -> 0\n",
   10234 					device_xname(sc->sc_dev)));
   10235 				rxq->rxq_discard = 0;
   10236 			}
   10237 			continue;
   10238 		}
   10239 
   10240 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10241 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   10242 
   10243 		m = rxs->rxs_mbuf;
   10244 
   10245 		/*
   10246 		 * Add a new receive buffer to the ring, unless of
   10247 		 * course the length is zero. Treat the latter as a
   10248 		 * failed mapping.
   10249 		 */
   10250 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   10251 			/*
   10252 			 * Failed, throw away what we've done so
   10253 			 * far, and discard the rest of the packet.
   10254 			 */
   10255 			if_statinc(ifp, if_ierrors);
   10256 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   10257 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   10258 			wm_init_rxdesc(rxq, i);
   10259 			if (!wm_rxdesc_is_eop(rxq, status))
   10260 				rxq->rxq_discard = 1;
   10261 			if (rxq->rxq_head != NULL)
   10262 				m_freem(rxq->rxq_head);
   10263 			WM_RXCHAIN_RESET(rxq);
   10264 			DPRINTF(sc, WM_DEBUG_RX,
   10265 			    ("%s: RX: Rx buffer allocation failed, "
   10266 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   10267 				rxq->rxq_discard ? " (discard)" : ""));
   10268 			continue;
   10269 		}
   10270 
   10271 		m->m_len = len;
   10272 		rxq->rxq_len += len;
   10273 		DPRINTF(sc, WM_DEBUG_RX,
   10274 		    ("%s: RX: buffer at %p len %d\n",
   10275 			device_xname(sc->sc_dev), m->m_data, len));
   10276 
   10277 		/* If this is not the end of the packet, keep looking. */
   10278 		if (!wm_rxdesc_is_eop(rxq, status)) {
   10279 			WM_RXCHAIN_LINK(rxq, m);
   10280 			DPRINTF(sc, WM_DEBUG_RX,
   10281 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   10282 				device_xname(sc->sc_dev), rxq->rxq_len));
   10283 			continue;
   10284 		}
   10285 
   10286 		/*
   10287 		 * Okay, we have the entire packet now. The chip is
   10288 		 * configured to include the FCS except I35[04], I21[01].
   10289 		 * (not all chips can be configured to strip it), so we need
   10290 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   10291 		 * in RCTL register is always set, so we don't trim it.
   10292 		 * PCH2 and newer chip also not include FCS when jumbo
   10293 		 * frame is used to do workaround an errata.
   10294 		 * May need to adjust length of previous mbuf in the
   10295 		 * chain if the current mbuf is too short.
   10296 		 */
   10297 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   10298 			if (m->m_len < ETHER_CRC_LEN) {
   10299 				rxq->rxq_tail->m_len
   10300 				    -= (ETHER_CRC_LEN - m->m_len);
   10301 				m->m_len = 0;
   10302 			} else
   10303 				m->m_len -= ETHER_CRC_LEN;
   10304 			len = rxq->rxq_len - ETHER_CRC_LEN;
   10305 		} else
   10306 			len = rxq->rxq_len;
   10307 
   10308 		WM_RXCHAIN_LINK(rxq, m);
   10309 
   10310 		*rxq->rxq_tailp = NULL;
   10311 		m = rxq->rxq_head;
   10312 
   10313 		WM_RXCHAIN_RESET(rxq);
   10314 
   10315 		DPRINTF(sc, WM_DEBUG_RX,
   10316 		    ("%s: RX: have entire packet, len -> %d\n",
   10317 			device_xname(sc->sc_dev), len));
   10318 
   10319 		/* If an error occurred, update stats and drop the packet. */
   10320 		if (wm_rxdesc_has_errors(rxq, errors)) {
   10321 			m_freem(m);
   10322 			continue;
   10323 		}
   10324 
   10325 		/* No errors.  Receive the packet. */
   10326 		m_set_rcvif(m, ifp);
   10327 		m->m_pkthdr.len = len;
   10328 		/*
   10329 		 * TODO
   10330 		 * should be save rsshash and rsstype to this mbuf.
   10331 		 */
   10332 		DPRINTF(sc, WM_DEBUG_RX,
   10333 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   10334 			device_xname(sc->sc_dev), rsstype, rsshash));
   10335 
   10336 		/*
   10337 		 * If VLANs are enabled, VLAN packets have been unwrapped
   10338 		 * for us.  Associate the tag with the packet.
   10339 		 */
   10340 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   10341 			continue;
   10342 
   10343 		/* Set up checksum info for this packet. */
   10344 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   10345 
   10346 		rxq->rxq_packets++;
   10347 		rxq->rxq_bytes += len;
   10348 		/* Pass it on. */
   10349 		if_percpuq_enqueue(sc->sc_ipq, m);
   10350 
   10351 		if (rxq->rxq_stopping)
   10352 			break;
   10353 	}
   10354 	rxq->rxq_ptr = i;
   10355 
   10356 	if (count != 0)
   10357 		rnd_add_uint32(&sc->rnd_source, count);
   10358 
   10359 	DPRINTF(sc, WM_DEBUG_RX,
   10360 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   10361 
   10362 	return more;
   10363 }
   10364 
   10365 /*
   10366  * wm_linkintr_gmii:
   10367  *
   10368  *	Helper; handle link interrupts for GMII.
   10369  */
   10370 static void
   10371 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   10372 {
   10373 	device_t dev = sc->sc_dev;
   10374 	uint32_t status, reg;
   10375 	bool link;
   10376 	bool dopoll = true;
   10377 	int rv;
   10378 
   10379 	KASSERT(mutex_owned(sc->sc_core_lock));
   10380 
   10381 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   10382 		__func__));
   10383 
   10384 	if ((icr & ICR_LSC) == 0) {
   10385 		if (icr & ICR_RXSEQ)
   10386 			DPRINTF(sc, WM_DEBUG_LINK,
   10387 			    ("%s: LINK Receive sequence error\n",
   10388 				device_xname(dev)));
   10389 		return;
   10390 	}
   10391 
   10392 	/* Link status changed */
   10393 	status = CSR_READ(sc, WMREG_STATUS);
   10394 	link = status & STATUS_LU;
   10395 	if (link) {
   10396 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10397 			device_xname(dev),
   10398 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10399 		if (wm_phy_need_linkdown_discard(sc)) {
   10400 			DPRINTF(sc, WM_DEBUG_LINK,
   10401 			    ("%s: linkintr: Clear linkdown discard flag\n",
   10402 				device_xname(dev)));
   10403 			wm_clear_linkdown_discard(sc);
   10404 		}
   10405 	} else {
   10406 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10407 			device_xname(dev)));
   10408 		if (wm_phy_need_linkdown_discard(sc)) {
   10409 			DPRINTF(sc, WM_DEBUG_LINK,
   10410 			    ("%s: linkintr: Set linkdown discard flag\n",
   10411 				device_xname(dev)));
   10412 			wm_set_linkdown_discard(sc);
   10413 		}
   10414 	}
   10415 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   10416 		wm_gig_downshift_workaround_ich8lan(sc);
   10417 
   10418 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   10419 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   10420 
   10421 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   10422 		device_xname(dev)));
   10423 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   10424 		if (link) {
   10425 			/*
   10426 			 * To workaround the problem, it's required to wait
   10427 			 * several hundred miliseconds. The time depend
   10428 			 * on the environment. Wait 1 second for the safety.
   10429 			 */
   10430 			dopoll = false;
   10431 			getmicrotime(&sc->sc_linkup_delay_time);
   10432 			sc->sc_linkup_delay_time.tv_sec += 1;
   10433 		} else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   10434 			/*
   10435 			 * Simplify by checking tv_sec only. It's enough.
   10436 			 *
   10437 			 * Currently, it's not required to clear the time.
   10438 			 * It's just to know the timer is stopped
   10439 			 * (for debugging).
   10440 			 */
   10441 
   10442 			sc->sc_linkup_delay_time.tv_sec = 0;
   10443 			sc->sc_linkup_delay_time.tv_usec = 0;
   10444 		}
   10445 	}
   10446 
   10447 	/*
   10448 	 * Call mii_pollstat().
   10449 	 *
   10450 	 * Some (not all) systems use I35[04] or I21[01] don't send packet soon
   10451 	 * after linkup. The MAC send a packet to the PHY and any error is not
   10452 	 * observed. This behavior causes a problem that gratuitous ARP and/or
   10453 	 * IPv6 DAD packet are silently dropped. To avoid this problem, don't
   10454 	 * call mii_pollstat() here which will send LINK_STATE_UP notification
   10455 	 * to the upper layer. Instead, mii_pollstat() will be called in
   10456 	 * wm_gmii_mediastatus() or mii_tick() will be called in wm_tick().
   10457 	 */
   10458 	if (dopoll)
   10459 		mii_pollstat(&sc->sc_mii);
   10460 
   10461 	/* Do some workarounds soon after link status is changed. */
   10462 
   10463 	if (sc->sc_type == WM_T_82543) {
   10464 		int miistatus, active;
   10465 
   10466 		/*
   10467 		 * With 82543, we need to force speed and
   10468 		 * duplex on the MAC equal to what the PHY
   10469 		 * speed and duplex configuration is.
   10470 		 */
   10471 		miistatus = sc->sc_mii.mii_media_status;
   10472 
   10473 		if (miistatus & IFM_ACTIVE) {
   10474 			active = sc->sc_mii.mii_media_active;
   10475 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10476 			switch (IFM_SUBTYPE(active)) {
   10477 			case IFM_10_T:
   10478 				sc->sc_ctrl |= CTRL_SPEED_10;
   10479 				break;
   10480 			case IFM_100_TX:
   10481 				sc->sc_ctrl |= CTRL_SPEED_100;
   10482 				break;
   10483 			case IFM_1000_T:
   10484 				sc->sc_ctrl |= CTRL_SPEED_1000;
   10485 				break;
   10486 			default:
   10487 				/*
   10488 				 * Fiber?
   10489 				 * Shoud not enter here.
   10490 				 */
   10491 				device_printf(dev, "unknown media (%x)\n",
   10492 				    active);
   10493 				break;
   10494 			}
   10495 			if (active & IFM_FDX)
   10496 				sc->sc_ctrl |= CTRL_FD;
   10497 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10498 		}
   10499 	} else if (sc->sc_type == WM_T_PCH) {
   10500 		wm_k1_gig_workaround_hv(sc,
   10501 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10502 	}
   10503 
   10504 	/*
   10505 	 * When connected at 10Mbps half-duplex, some parts are excessively
   10506 	 * aggressive resulting in many collisions. To avoid this, increase
   10507 	 * the IPG and reduce Rx latency in the PHY.
   10508 	 */
   10509 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_TGP)
   10510 	    && link) {
   10511 		uint32_t tipg_reg;
   10512 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   10513 		bool fdx;
   10514 		uint16_t emi_addr, emi_val;
   10515 
   10516 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   10517 		tipg_reg &= ~TIPG_IPGT_MASK;
   10518 		fdx = status & STATUS_FD;
   10519 
   10520 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10521 			tipg_reg |= 0xff;
   10522 			/* Reduce Rx latency in analog PHY */
   10523 			emi_val = 0;
   10524 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10525 		    fdx && speed != STATUS_SPEED_1000) {
   10526 			tipg_reg |= 0xc;
   10527 			emi_val = 1;
   10528 		} else {
   10529 			/* Roll back the default values */
   10530 			tipg_reg |= 0x08;
   10531 			emi_val = 1;
   10532 		}
   10533 
   10534 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10535 
   10536 		rv = sc->phy.acquire(sc);
   10537 		if (rv)
   10538 			return;
   10539 
   10540 		if (sc->sc_type == WM_T_PCH2)
   10541 			emi_addr = I82579_RX_CONFIG;
   10542 		else
   10543 			emi_addr = I217_RX_CONFIG;
   10544 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10545 
   10546 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10547 			uint16_t phy_reg;
   10548 
   10549 			sc->phy.readreg_locked(dev, 2,
   10550 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10551 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10552 			if (speed == STATUS_SPEED_100
   10553 			    || speed == STATUS_SPEED_10)
   10554 				phy_reg |= 0x3e8;
   10555 			else
   10556 				phy_reg |= 0xfa;
   10557 			sc->phy.writereg_locked(dev, 2,
   10558 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10559 
   10560 			if (speed == STATUS_SPEED_1000) {
   10561 				sc->phy.readreg_locked(dev, 2,
   10562 				    HV_PM_CTRL, &phy_reg);
   10563 
   10564 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10565 
   10566 				sc->phy.writereg_locked(dev, 2,
   10567 				    HV_PM_CTRL, phy_reg);
   10568 			}
   10569 		}
   10570 		sc->phy.release(sc);
   10571 
   10572 		if (rv)
   10573 			return;
   10574 
   10575 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10576 			uint16_t data, ptr_gap;
   10577 
   10578 			if (speed == STATUS_SPEED_1000) {
   10579 				rv = sc->phy.acquire(sc);
   10580 				if (rv)
   10581 					return;
   10582 
   10583 				rv = sc->phy.readreg_locked(dev, 2,
   10584 				    I82579_UNKNOWN1, &data);
   10585 				if (rv) {
   10586 					sc->phy.release(sc);
   10587 					return;
   10588 				}
   10589 
   10590 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10591 				if (ptr_gap < 0x18) {
   10592 					data &= ~(0x3ff << 2);
   10593 					data |= (0x18 << 2);
   10594 					rv = sc->phy.writereg_locked(dev,
   10595 					    2, I82579_UNKNOWN1, data);
   10596 				}
   10597 				sc->phy.release(sc);
   10598 				if (rv)
   10599 					return;
   10600 			} else {
   10601 				rv = sc->phy.acquire(sc);
   10602 				if (rv)
   10603 					return;
   10604 
   10605 				rv = sc->phy.writereg_locked(dev, 2,
   10606 				    I82579_UNKNOWN1, 0xc023);
   10607 				sc->phy.release(sc);
   10608 				if (rv)
   10609 					return;
   10610 
   10611 			}
   10612 		}
   10613 	}
   10614 
   10615 	/*
   10616 	 * I217 Packet Loss issue:
   10617 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10618 	 * on power up.
   10619 	 * Set the Beacon Duration for I217 to 8 usec
   10620 	 */
   10621 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10622 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10623 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10624 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10625 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10626 	}
   10627 
   10628 	/* Work-around I218 hang issue */
   10629 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10630 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10631 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10632 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10633 		wm_k1_workaround_lpt_lp(sc, link);
   10634 
   10635 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10636 		/*
   10637 		 * Set platform power management values for Latency
   10638 		 * Tolerance Reporting (LTR)
   10639 		 */
   10640 		wm_platform_pm_pch_lpt(sc,
   10641 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10642 	}
   10643 
   10644 	/* Clear link partner's EEE ability */
   10645 	sc->eee_lp_ability = 0;
   10646 
   10647 	/* FEXTNVM6 K1-off workaround */
   10648 	if (sc->sc_type == WM_T_PCH_SPT) {
   10649 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10650 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10651 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10652 		else
   10653 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10654 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10655 	}
   10656 
   10657 	if (!link)
   10658 		return;
   10659 
   10660 	switch (sc->sc_type) {
   10661 	case WM_T_PCH2:
   10662 		wm_k1_workaround_lv(sc);
   10663 		/* FALLTHROUGH */
   10664 	case WM_T_PCH:
   10665 		if (sc->sc_phytype == WMPHY_82578)
   10666 			wm_link_stall_workaround_hv(sc);
   10667 		break;
   10668 	default:
   10669 		break;
   10670 	}
   10671 
   10672 	/* Enable/Disable EEE after link up */
   10673 	if (sc->sc_phytype > WMPHY_82579)
   10674 		wm_set_eee_pchlan(sc);
   10675 }
   10676 
   10677 /*
   10678  * wm_linkintr_tbi:
   10679  *
   10680  *	Helper; handle link interrupts for TBI mode.
   10681  */
   10682 static void
   10683 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10684 {
   10685 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10686 	uint32_t status;
   10687 
   10688 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10689 		__func__));
   10690 
   10691 	status = CSR_READ(sc, WMREG_STATUS);
   10692 	if (icr & ICR_LSC) {
   10693 		wm_check_for_link(sc);
   10694 		if (status & STATUS_LU) {
   10695 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10696 				device_xname(sc->sc_dev),
   10697 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10698 			/*
   10699 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10700 			 * so we should update sc->sc_ctrl
   10701 			 */
   10702 
   10703 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10704 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10705 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10706 			if (status & STATUS_FD)
   10707 				sc->sc_tctl |=
   10708 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10709 			else
   10710 				sc->sc_tctl |=
   10711 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10712 			if (sc->sc_ctrl & CTRL_TFCE)
   10713 				sc->sc_fcrtl |= FCRTL_XONE;
   10714 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10715 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10716 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10717 			sc->sc_tbi_linkup = 1;
   10718 			if_link_state_change(ifp, LINK_STATE_UP);
   10719 		} else {
   10720 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10721 				device_xname(sc->sc_dev)));
   10722 			sc->sc_tbi_linkup = 0;
   10723 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10724 		}
   10725 		/* Update LED */
   10726 		wm_tbi_serdes_set_linkled(sc);
   10727 	} else if (icr & ICR_RXSEQ)
   10728 		DPRINTF(sc, WM_DEBUG_LINK,
   10729 		    ("%s: LINK: Receive sequence error\n",
   10730 			device_xname(sc->sc_dev)));
   10731 }
   10732 
   10733 /*
   10734  * wm_linkintr_serdes:
   10735  *
   10736  *	Helper; handle link interrupts for TBI mode.
   10737  */
   10738 static void
   10739 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10740 {
   10741 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10742 	struct mii_data *mii = &sc->sc_mii;
   10743 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10744 	uint32_t pcs_adv, pcs_lpab, reg;
   10745 
   10746 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10747 		__func__));
   10748 
   10749 	if (icr & ICR_LSC) {
   10750 		/* Check PCS */
   10751 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10752 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10753 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10754 				device_xname(sc->sc_dev)));
   10755 			mii->mii_media_status |= IFM_ACTIVE;
   10756 			sc->sc_tbi_linkup = 1;
   10757 			if_link_state_change(ifp, LINK_STATE_UP);
   10758 		} else {
   10759 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10760 				device_xname(sc->sc_dev)));
   10761 			mii->mii_media_status |= IFM_NONE;
   10762 			sc->sc_tbi_linkup = 0;
   10763 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10764 			wm_tbi_serdes_set_linkled(sc);
   10765 			return;
   10766 		}
   10767 		mii->mii_media_active |= IFM_1000_SX;
   10768 		if ((reg & PCS_LSTS_FDX) != 0)
   10769 			mii->mii_media_active |= IFM_FDX;
   10770 		else
   10771 			mii->mii_media_active |= IFM_HDX;
   10772 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10773 			/* Check flow */
   10774 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10775 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10776 				DPRINTF(sc, WM_DEBUG_LINK,
   10777 				    ("XXX LINKOK but not ACOMP\n"));
   10778 				return;
   10779 			}
   10780 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10781 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10782 			DPRINTF(sc, WM_DEBUG_LINK,
   10783 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10784 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10785 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10786 				mii->mii_media_active |= IFM_FLOW
   10787 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10788 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10789 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10790 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10791 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10792 				mii->mii_media_active |= IFM_FLOW
   10793 				    | IFM_ETH_TXPAUSE;
   10794 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10795 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10796 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10797 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10798 				mii->mii_media_active |= IFM_FLOW
   10799 				    | IFM_ETH_RXPAUSE;
   10800 		}
   10801 		/* Update LED */
   10802 		wm_tbi_serdes_set_linkled(sc);
   10803 	} else
   10804 		DPRINTF(sc, WM_DEBUG_LINK,
   10805 		    ("%s: LINK: Receive sequence error\n",
   10806 		    device_xname(sc->sc_dev)));
   10807 }
   10808 
   10809 /*
   10810  * wm_linkintr:
   10811  *
   10812  *	Helper; handle link interrupts.
   10813  */
   10814 static void
   10815 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10816 {
   10817 
   10818 	KASSERT(mutex_owned(sc->sc_core_lock));
   10819 
   10820 	if (sc->sc_flags & WM_F_HAS_MII)
   10821 		wm_linkintr_gmii(sc, icr);
   10822 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10823 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10824 		wm_linkintr_serdes(sc, icr);
   10825 	else
   10826 		wm_linkintr_tbi(sc, icr);
   10827 }
   10828 
   10829 
   10830 static inline void
   10831 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10832 {
   10833 
   10834 	if (wmq->wmq_txrx_use_workqueue) {
   10835 		if (!wmq->wmq_wq_enqueued) {
   10836 			wmq->wmq_wq_enqueued = true;
   10837 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
   10838 			    curcpu());
   10839 		}
   10840 	} else
   10841 		softint_schedule(wmq->wmq_si);
   10842 }
   10843 
   10844 static inline void
   10845 wm_legacy_intr_disable(struct wm_softc *sc)
   10846 {
   10847 
   10848 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10849 }
   10850 
   10851 static inline void
   10852 wm_legacy_intr_enable(struct wm_softc *sc)
   10853 {
   10854 
   10855 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10856 }
   10857 
   10858 /*
   10859  * wm_intr_legacy:
   10860  *
   10861  *	Interrupt service routine for INTx and MSI.
   10862  */
   10863 static int
   10864 wm_intr_legacy(void *arg)
   10865 {
   10866 	struct wm_softc *sc = arg;
   10867 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10868 	struct wm_queue *wmq = &sc->sc_queue[0];
   10869 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10870 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10871 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10872 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10873 	uint32_t icr, rndval = 0;
   10874 	bool more = false;
   10875 
   10876 	icr = CSR_READ(sc, WMREG_ICR);
   10877 	if ((icr & sc->sc_icr) == 0)
   10878 		return 0;
   10879 
   10880 	DPRINTF(sc, WM_DEBUG_TX,
   10881 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10882 	if (rndval == 0)
   10883 		rndval = icr;
   10884 
   10885 	mutex_enter(txq->txq_lock);
   10886 
   10887 	if (txq->txq_stopping) {
   10888 		mutex_exit(txq->txq_lock);
   10889 		return 1;
   10890 	}
   10891 
   10892 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10893 	if (icr & ICR_TXDW) {
   10894 		DPRINTF(sc, WM_DEBUG_TX,
   10895 		    ("%s: TX: got TXDW interrupt\n",
   10896 			device_xname(sc->sc_dev)));
   10897 		WM_Q_EVCNT_INCR(txq, txdw);
   10898 	}
   10899 #endif
   10900 	if (txlimit > 0) {
   10901 		more |= wm_txeof(txq, txlimit);
   10902 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10903 			more = true;
   10904 	} else
   10905 		more = true;
   10906 	mutex_exit(txq->txq_lock);
   10907 
   10908 	mutex_enter(rxq->rxq_lock);
   10909 
   10910 	if (rxq->rxq_stopping) {
   10911 		mutex_exit(rxq->rxq_lock);
   10912 		return 1;
   10913 	}
   10914 
   10915 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10916 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10917 		DPRINTF(sc, WM_DEBUG_RX,
   10918 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10919 			device_xname(sc->sc_dev),
   10920 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10921 		WM_Q_EVCNT_INCR(rxq, intr);
   10922 	}
   10923 #endif
   10924 	if (rxlimit > 0) {
   10925 		/*
   10926 		 * wm_rxeof() does *not* call upper layer functions directly,
   10927 		 * as if_percpuq_enqueue() just call softint_schedule().
   10928 		 * So, we can call wm_rxeof() in interrupt context.
   10929 		 */
   10930 		more = wm_rxeof(rxq, rxlimit);
   10931 	} else
   10932 		more = true;
   10933 
   10934 	mutex_exit(rxq->rxq_lock);
   10935 
   10936 	mutex_enter(sc->sc_core_lock);
   10937 
   10938 	if (sc->sc_core_stopping) {
   10939 		mutex_exit(sc->sc_core_lock);
   10940 		return 1;
   10941 	}
   10942 
   10943 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10944 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10945 		wm_linkintr(sc, icr);
   10946 	}
   10947 	if ((icr & ICR_GPI(0)) != 0)
   10948 		device_printf(sc->sc_dev, "got module interrupt\n");
   10949 
   10950 	mutex_exit(sc->sc_core_lock);
   10951 
   10952 	if (icr & ICR_RXO) {
   10953 #if defined(WM_DEBUG)
   10954 		log(LOG_WARNING, "%s: Receive overrun\n",
   10955 		    device_xname(sc->sc_dev));
   10956 #endif /* defined(WM_DEBUG) */
   10957 	}
   10958 
   10959 	rnd_add_uint32(&sc->rnd_source, rndval);
   10960 
   10961 	if (more) {
   10962 		/* Try to get more packets going. */
   10963 		wm_legacy_intr_disable(sc);
   10964 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10965 		wm_sched_handle_queue(sc, wmq);
   10966 	}
   10967 
   10968 	return 1;
   10969 }
   10970 
   10971 static inline void
   10972 wm_txrxintr_disable(struct wm_queue *wmq)
   10973 {
   10974 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10975 
   10976 	if (__predict_false(!wm_is_using_msix(sc))) {
   10977 		wm_legacy_intr_disable(sc);
   10978 		return;
   10979 	}
   10980 
   10981 	if (sc->sc_type == WM_T_82574)
   10982 		CSR_WRITE(sc, WMREG_IMC,
   10983 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10984 	else if (sc->sc_type == WM_T_82575)
   10985 		CSR_WRITE(sc, WMREG_EIMC,
   10986 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10987 	else
   10988 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10989 }
   10990 
   10991 static inline void
   10992 wm_txrxintr_enable(struct wm_queue *wmq)
   10993 {
   10994 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10995 
   10996 	wm_itrs_calculate(sc, wmq);
   10997 
   10998 	if (__predict_false(!wm_is_using_msix(sc))) {
   10999 		wm_legacy_intr_enable(sc);
   11000 		return;
   11001 	}
   11002 
   11003 	/*
   11004 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   11005 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   11006 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   11007 	 * while each wm_handle_queue(wmq) is runnig.
   11008 	 */
   11009 	if (sc->sc_type == WM_T_82574)
   11010 		CSR_WRITE(sc, WMREG_IMS,
   11011 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   11012 	else if (sc->sc_type == WM_T_82575)
   11013 		CSR_WRITE(sc, WMREG_EIMS,
   11014 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   11015 	else
   11016 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   11017 }
   11018 
   11019 static int
   11020 wm_txrxintr_msix(void *arg)
   11021 {
   11022 	struct wm_queue *wmq = arg;
   11023 	struct wm_txqueue *txq = &wmq->wmq_txq;
   11024 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   11025 	struct wm_softc *sc = txq->txq_sc;
   11026 	u_int txlimit = sc->sc_tx_intr_process_limit;
   11027 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   11028 	bool txmore;
   11029 	bool rxmore;
   11030 
   11031 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   11032 
   11033 	DPRINTF(sc, WM_DEBUG_TX,
   11034 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   11035 
   11036 	wm_txrxintr_disable(wmq);
   11037 
   11038 	mutex_enter(txq->txq_lock);
   11039 
   11040 	if (txq->txq_stopping) {
   11041 		mutex_exit(txq->txq_lock);
   11042 		return 1;
   11043 	}
   11044 
   11045 	WM_Q_EVCNT_INCR(txq, txdw);
   11046 	if (txlimit > 0) {
   11047 		txmore = wm_txeof(txq, txlimit);
   11048 		/* wm_deferred start() is done in wm_handle_queue(). */
   11049 	} else
   11050 		txmore = true;
   11051 	mutex_exit(txq->txq_lock);
   11052 
   11053 	DPRINTF(sc, WM_DEBUG_RX,
   11054 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   11055 	mutex_enter(rxq->rxq_lock);
   11056 
   11057 	if (rxq->rxq_stopping) {
   11058 		mutex_exit(rxq->rxq_lock);
   11059 		return 1;
   11060 	}
   11061 
   11062 	WM_Q_EVCNT_INCR(rxq, intr);
   11063 	if (rxlimit > 0) {
   11064 		rxmore = wm_rxeof(rxq, rxlimit);
   11065 	} else
   11066 		rxmore = true;
   11067 	mutex_exit(rxq->rxq_lock);
   11068 
   11069 	wm_itrs_writereg(sc, wmq);
   11070 
   11071 	if (txmore || rxmore) {
   11072 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11073 		wm_sched_handle_queue(sc, wmq);
   11074 	} else
   11075 		wm_txrxintr_enable(wmq);
   11076 
   11077 	return 1;
   11078 }
   11079 
   11080 static void
   11081 wm_handle_queue(void *arg)
   11082 {
   11083 	struct wm_queue *wmq = arg;
   11084 	struct wm_txqueue *txq = &wmq->wmq_txq;
   11085 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   11086 	struct wm_softc *sc = txq->txq_sc;
   11087 	u_int txlimit = sc->sc_tx_process_limit;
   11088 	u_int rxlimit = sc->sc_rx_process_limit;
   11089 	bool txmore;
   11090 	bool rxmore;
   11091 
   11092 	mutex_enter(txq->txq_lock);
   11093 	if (txq->txq_stopping) {
   11094 		mutex_exit(txq->txq_lock);
   11095 		return;
   11096 	}
   11097 	txmore = wm_txeof(txq, txlimit);
   11098 	wm_deferred_start_locked(txq);
   11099 	mutex_exit(txq->txq_lock);
   11100 
   11101 	mutex_enter(rxq->rxq_lock);
   11102 	if (rxq->rxq_stopping) {
   11103 		mutex_exit(rxq->rxq_lock);
   11104 		return;
   11105 	}
   11106 	WM_Q_EVCNT_INCR(rxq, defer);
   11107 	rxmore = wm_rxeof(rxq, rxlimit);
   11108 	mutex_exit(rxq->rxq_lock);
   11109 
   11110 	if (txmore || rxmore) {
   11111 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   11112 		wm_sched_handle_queue(sc, wmq);
   11113 	} else
   11114 		wm_txrxintr_enable(wmq);
   11115 }
   11116 
   11117 static void
   11118 wm_handle_queue_work(struct work *wk, void *context)
   11119 {
   11120 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   11121 
   11122 	/*
   11123 	 * Some qemu environment workaround.  They don't stop interrupt
   11124 	 * immediately.
   11125 	 */
   11126 	wmq->wmq_wq_enqueued = false;
   11127 	wm_handle_queue(wmq);
   11128 }
   11129 
   11130 /*
   11131  * wm_linkintr_msix:
   11132  *
   11133  *	Interrupt service routine for link status change for MSI-X.
   11134  */
   11135 static int
   11136 wm_linkintr_msix(void *arg)
   11137 {
   11138 	struct wm_softc *sc = arg;
   11139 	uint32_t reg;
   11140 	bool has_rxo;
   11141 
   11142 	reg = CSR_READ(sc, WMREG_ICR);
   11143 	mutex_enter(sc->sc_core_lock);
   11144 	DPRINTF(sc, WM_DEBUG_LINK,
   11145 	    ("%s: LINK: got link intr. ICR = %08x\n",
   11146 		device_xname(sc->sc_dev), reg));
   11147 
   11148 	if (sc->sc_core_stopping)
   11149 		goto out;
   11150 
   11151 	if ((reg & ICR_LSC) != 0) {
   11152 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   11153 		wm_linkintr(sc, ICR_LSC);
   11154 	}
   11155 	if ((reg & ICR_GPI(0)) != 0)
   11156 		device_printf(sc->sc_dev, "got module interrupt\n");
   11157 
   11158 	/*
   11159 	 * XXX 82574 MSI-X mode workaround
   11160 	 *
   11161 	 * 82574 MSI-X mode causes a receive overrun(RXO) interrupt as an
   11162 	 * ICR_OTHER MSI-X vector; furthermore it causes neither ICR_RXQ(0)
   11163 	 * nor ICR_RXQ(1) vectors. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   11164 	 * interrupts by writing WMREG_ICS to process receive packets.
   11165 	 */
   11166 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   11167 #if defined(WM_DEBUG)
   11168 		log(LOG_WARNING, "%s: Receive overrun\n",
   11169 		    device_xname(sc->sc_dev));
   11170 #endif /* defined(WM_DEBUG) */
   11171 
   11172 		has_rxo = true;
   11173 		/*
   11174 		 * The RXO interrupt is very high rate when receive traffic is
   11175 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   11176 		 * interrupts. ICR_OTHER will be enabled at the end of
   11177 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   11178 		 * ICR_RXQ(1) interrupts.
   11179 		 */
   11180 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   11181 
   11182 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   11183 	}
   11184 
   11185 
   11186 
   11187 out:
   11188 	mutex_exit(sc->sc_core_lock);
   11189 
   11190 	if (sc->sc_type == WM_T_82574) {
   11191 		if (!has_rxo)
   11192 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   11193 		else
   11194 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   11195 	} else if (sc->sc_type == WM_T_82575)
   11196 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   11197 	else
   11198 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   11199 
   11200 	return 1;
   11201 }
   11202 
   11203 /*
   11204  * Media related.
   11205  * GMII, SGMII, TBI (and SERDES)
   11206  */
   11207 
   11208 /* Common */
   11209 
   11210 /*
   11211  * wm_tbi_serdes_set_linkled:
   11212  *
   11213  *	Update the link LED on TBI and SERDES devices.
   11214  */
   11215 static void
   11216 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   11217 {
   11218 
   11219 	if (sc->sc_tbi_linkup)
   11220 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   11221 	else
   11222 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   11223 
   11224 	/* 82540 or newer devices are active low */
   11225 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   11226 
   11227 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11228 }
   11229 
   11230 /* GMII related */
   11231 
   11232 /*
   11233  * wm_gmii_reset:
   11234  *
   11235  *	Reset the PHY.
   11236  */
   11237 static void
   11238 wm_gmii_reset(struct wm_softc *sc)
   11239 {
   11240 	uint32_t reg;
   11241 	int rv;
   11242 
   11243 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11244 		device_xname(sc->sc_dev), __func__));
   11245 
   11246 	rv = sc->phy.acquire(sc);
   11247 	if (rv != 0) {
   11248 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11249 		    __func__);
   11250 		return;
   11251 	}
   11252 
   11253 	switch (sc->sc_type) {
   11254 	case WM_T_82542_2_0:
   11255 	case WM_T_82542_2_1:
   11256 		/* null */
   11257 		break;
   11258 	case WM_T_82543:
   11259 		/*
   11260 		 * With 82543, we need to force speed and duplex on the MAC
   11261 		 * equal to what the PHY speed and duplex configuration is.
   11262 		 * In addition, we need to perform a hardware reset on the PHY
   11263 		 * to take it out of reset.
   11264 		 */
   11265 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11266 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11267 
   11268 		/* The PHY reset pin is active-low. */
   11269 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11270 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   11271 		    CTRL_EXT_SWDPIN(4));
   11272 		reg |= CTRL_EXT_SWDPIO(4);
   11273 
   11274 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11275 		CSR_WRITE_FLUSH(sc);
   11276 		delay(10*1000);
   11277 
   11278 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   11279 		CSR_WRITE_FLUSH(sc);
   11280 		delay(150);
   11281 #if 0
   11282 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   11283 #endif
   11284 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   11285 		break;
   11286 	case WM_T_82544:	/* Reset 10000us */
   11287 	case WM_T_82540:
   11288 	case WM_T_82545:
   11289 	case WM_T_82545_3:
   11290 	case WM_T_82546:
   11291 	case WM_T_82546_3:
   11292 	case WM_T_82541:
   11293 	case WM_T_82541_2:
   11294 	case WM_T_82547:
   11295 	case WM_T_82547_2:
   11296 	case WM_T_82571:	/* Reset 100us */
   11297 	case WM_T_82572:
   11298 	case WM_T_82573:
   11299 	case WM_T_82574:
   11300 	case WM_T_82575:
   11301 	case WM_T_82576:
   11302 	case WM_T_82580:
   11303 	case WM_T_I350:
   11304 	case WM_T_I354:
   11305 	case WM_T_I210:
   11306 	case WM_T_I211:
   11307 	case WM_T_82583:
   11308 	case WM_T_80003:
   11309 		/* Generic reset */
   11310 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11311 		CSR_WRITE_FLUSH(sc);
   11312 		delay(20000);
   11313 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11314 		CSR_WRITE_FLUSH(sc);
   11315 		delay(20000);
   11316 
   11317 		if ((sc->sc_type == WM_T_82541)
   11318 		    || (sc->sc_type == WM_T_82541_2)
   11319 		    || (sc->sc_type == WM_T_82547)
   11320 		    || (sc->sc_type == WM_T_82547_2)) {
   11321 			/* Workaround for igp are done in igp_reset() */
   11322 			/* XXX add code to set LED after phy reset */
   11323 		}
   11324 		break;
   11325 	case WM_T_ICH8:
   11326 	case WM_T_ICH9:
   11327 	case WM_T_ICH10:
   11328 	case WM_T_PCH:
   11329 	case WM_T_PCH2:
   11330 	case WM_T_PCH_LPT:
   11331 	case WM_T_PCH_SPT:
   11332 	case WM_T_PCH_CNP:
   11333 	case WM_T_PCH_TGP:
   11334 		/* Generic reset */
   11335 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11336 		CSR_WRITE_FLUSH(sc);
   11337 		delay(100);
   11338 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11339 		CSR_WRITE_FLUSH(sc);
   11340 		delay(150);
   11341 		break;
   11342 	default:
   11343 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   11344 		    __func__);
   11345 		break;
   11346 	}
   11347 
   11348 	sc->phy.release(sc);
   11349 
   11350 	/* get_cfg_done */
   11351 	wm_get_cfg_done(sc);
   11352 
   11353 	/* Extra setup */
   11354 	switch (sc->sc_type) {
   11355 	case WM_T_82542_2_0:
   11356 	case WM_T_82542_2_1:
   11357 	case WM_T_82543:
   11358 	case WM_T_82544:
   11359 	case WM_T_82540:
   11360 	case WM_T_82545:
   11361 	case WM_T_82545_3:
   11362 	case WM_T_82546:
   11363 	case WM_T_82546_3:
   11364 	case WM_T_82541_2:
   11365 	case WM_T_82547_2:
   11366 	case WM_T_82571:
   11367 	case WM_T_82572:
   11368 	case WM_T_82573:
   11369 	case WM_T_82574:
   11370 	case WM_T_82583:
   11371 	case WM_T_82575:
   11372 	case WM_T_82576:
   11373 	case WM_T_82580:
   11374 	case WM_T_I350:
   11375 	case WM_T_I354:
   11376 	case WM_T_I210:
   11377 	case WM_T_I211:
   11378 	case WM_T_80003:
   11379 		/* Null */
   11380 		break;
   11381 	case WM_T_82541:
   11382 	case WM_T_82547:
   11383 		/* XXX Configure actively LED after PHY reset */
   11384 		break;
   11385 	case WM_T_ICH8:
   11386 	case WM_T_ICH9:
   11387 	case WM_T_ICH10:
   11388 	case WM_T_PCH:
   11389 	case WM_T_PCH2:
   11390 	case WM_T_PCH_LPT:
   11391 	case WM_T_PCH_SPT:
   11392 	case WM_T_PCH_CNP:
   11393 	case WM_T_PCH_TGP:
   11394 		wm_phy_post_reset(sc);
   11395 		break;
   11396 	default:
   11397 		panic("%s: unknown type\n", __func__);
   11398 		break;
   11399 	}
   11400 }
   11401 
   11402 /*
   11403  * Set up sc_phytype and mii_{read|write}reg.
   11404  *
   11405  *  To identify PHY type, correct read/write function should be selected.
   11406  * To select correct read/write function, PCI ID or MAC type are required
   11407  * without accessing PHY registers.
   11408  *
   11409  *  On the first call of this function, PHY ID is not known yet. Check
   11410  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   11411  * result might be incorrect.
   11412  *
   11413  *  In the second call, PHY OUI and model is used to identify PHY type.
   11414  * It might not be perfect because of the lack of compared entry, but it
   11415  * would be better than the first call.
   11416  *
   11417  *  If the detected new result and previous assumption is different,
   11418  * a diagnostic message will be printed.
   11419  */
   11420 static void
   11421 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   11422     uint16_t phy_model)
   11423 {
   11424 	device_t dev = sc->sc_dev;
   11425 	struct mii_data *mii = &sc->sc_mii;
   11426 	uint16_t new_phytype = WMPHY_UNKNOWN;
   11427 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   11428 	mii_readreg_t new_readreg;
   11429 	mii_writereg_t new_writereg;
   11430 	bool dodiag = true;
   11431 
   11432 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11433 		device_xname(sc->sc_dev), __func__));
   11434 
   11435 	/*
   11436 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   11437 	 * incorrect. So don't print diag output when it's 2nd call.
   11438 	 */
   11439 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   11440 		dodiag = false;
   11441 
   11442 	if (mii->mii_readreg == NULL) {
   11443 		/*
   11444 		 *  This is the first call of this function. For ICH and PCH
   11445 		 * variants, it's difficult to determine the PHY access method
   11446 		 * by sc_type, so use the PCI product ID for some devices.
   11447 		 */
   11448 
   11449 		switch (sc->sc_pcidevid) {
   11450 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   11451 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   11452 			/* 82577 */
   11453 			new_phytype = WMPHY_82577;
   11454 			break;
   11455 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   11456 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   11457 			/* 82578 */
   11458 			new_phytype = WMPHY_82578;
   11459 			break;
   11460 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   11461 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   11462 			/* 82579 */
   11463 			new_phytype = WMPHY_82579;
   11464 			break;
   11465 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   11466 		case PCI_PRODUCT_INTEL_82801I_BM:
   11467 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   11468 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   11469 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   11470 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   11471 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   11472 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   11473 			/* ICH8, 9, 10 with 82567 */
   11474 			new_phytype = WMPHY_BM;
   11475 			break;
   11476 		default:
   11477 			break;
   11478 		}
   11479 	} else {
   11480 		/* It's not the first call. Use PHY OUI and model */
   11481 		switch (phy_oui) {
   11482 		case MII_OUI_ATTANSIC: /* atphy(4) */
   11483 			switch (phy_model) {
   11484 			case MII_MODEL_ATTANSIC_AR8021:
   11485 				new_phytype = WMPHY_82578;
   11486 				break;
   11487 			default:
   11488 				break;
   11489 			}
   11490 			break;
   11491 		case MII_OUI_xxMARVELL:
   11492 			switch (phy_model) {
   11493 			case MII_MODEL_xxMARVELL_I210:
   11494 				new_phytype = WMPHY_I210;
   11495 				break;
   11496 			case MII_MODEL_xxMARVELL_E1011:
   11497 			case MII_MODEL_xxMARVELL_E1000_3:
   11498 			case MII_MODEL_xxMARVELL_E1000_5:
   11499 			case MII_MODEL_xxMARVELL_E1112:
   11500 				new_phytype = WMPHY_M88;
   11501 				break;
   11502 			case MII_MODEL_xxMARVELL_E1149:
   11503 				new_phytype = WMPHY_BM;
   11504 				break;
   11505 			case MII_MODEL_xxMARVELL_E1111:
   11506 			case MII_MODEL_xxMARVELL_I347:
   11507 			case MII_MODEL_xxMARVELL_E1512:
   11508 			case MII_MODEL_xxMARVELL_E1340M:
   11509 			case MII_MODEL_xxMARVELL_E1543:
   11510 				new_phytype = WMPHY_M88;
   11511 				break;
   11512 			case MII_MODEL_xxMARVELL_I82563:
   11513 				new_phytype = WMPHY_GG82563;
   11514 				break;
   11515 			default:
   11516 				break;
   11517 			}
   11518 			break;
   11519 		case MII_OUI_INTEL:
   11520 			switch (phy_model) {
   11521 			case MII_MODEL_INTEL_I82577:
   11522 				new_phytype = WMPHY_82577;
   11523 				break;
   11524 			case MII_MODEL_INTEL_I82579:
   11525 				new_phytype = WMPHY_82579;
   11526 				break;
   11527 			case MII_MODEL_INTEL_I217:
   11528 				new_phytype = WMPHY_I217;
   11529 				break;
   11530 			case MII_MODEL_INTEL_I82580:
   11531 				new_phytype = WMPHY_82580;
   11532 				break;
   11533 			case MII_MODEL_INTEL_I350:
   11534 				new_phytype = WMPHY_I350;
   11535 				break;
   11536 			default:
   11537 				break;
   11538 			}
   11539 			break;
   11540 		case MII_OUI_yyINTEL:
   11541 			switch (phy_model) {
   11542 			case MII_MODEL_yyINTEL_I82562G:
   11543 			case MII_MODEL_yyINTEL_I82562EM:
   11544 			case MII_MODEL_yyINTEL_I82562ET:
   11545 				new_phytype = WMPHY_IFE;
   11546 				break;
   11547 			case MII_MODEL_yyINTEL_IGP01E1000:
   11548 				new_phytype = WMPHY_IGP;
   11549 				break;
   11550 			case MII_MODEL_yyINTEL_I82566:
   11551 				new_phytype = WMPHY_IGP_3;
   11552 				break;
   11553 			default:
   11554 				break;
   11555 			}
   11556 			break;
   11557 		default:
   11558 			break;
   11559 		}
   11560 
   11561 		if (dodiag) {
   11562 			if (new_phytype == WMPHY_UNKNOWN)
   11563 				aprint_verbose_dev(dev,
   11564 				    "%s: Unknown PHY model. OUI=%06x, "
   11565 				    "model=%04x\n", __func__, phy_oui,
   11566 				    phy_model);
   11567 
   11568 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11569 			    && (sc->sc_phytype != new_phytype)) {
   11570 				aprint_error_dev(dev, "Previously assumed PHY "
   11571 				    "type(%u) was incorrect. PHY type from PHY"
   11572 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11573 			}
   11574 		}
   11575 	}
   11576 
   11577 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11578 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11579 		/* SGMII */
   11580 		new_readreg = wm_sgmii_readreg;
   11581 		new_writereg = wm_sgmii_writereg;
   11582 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11583 		/* BM2 (phyaddr == 1) */
   11584 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11585 		    && (new_phytype != WMPHY_BM)
   11586 		    && (new_phytype != WMPHY_UNKNOWN))
   11587 			doubt_phytype = new_phytype;
   11588 		new_phytype = WMPHY_BM;
   11589 		new_readreg = wm_gmii_bm_readreg;
   11590 		new_writereg = wm_gmii_bm_writereg;
   11591 	} else if (sc->sc_type >= WM_T_PCH) {
   11592 		/* All PCH* use _hv_ */
   11593 		new_readreg = wm_gmii_hv_readreg;
   11594 		new_writereg = wm_gmii_hv_writereg;
   11595 	} else if (sc->sc_type >= WM_T_ICH8) {
   11596 		/* non-82567 ICH8, 9 and 10 */
   11597 		new_readreg = wm_gmii_i82544_readreg;
   11598 		new_writereg = wm_gmii_i82544_writereg;
   11599 	} else if (sc->sc_type >= WM_T_80003) {
   11600 		/* 80003 */
   11601 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11602 		    && (new_phytype != WMPHY_GG82563)
   11603 		    && (new_phytype != WMPHY_UNKNOWN))
   11604 			doubt_phytype = new_phytype;
   11605 		new_phytype = WMPHY_GG82563;
   11606 		new_readreg = wm_gmii_i80003_readreg;
   11607 		new_writereg = wm_gmii_i80003_writereg;
   11608 	} else if (sc->sc_type >= WM_T_I210) {
   11609 		/* I210 and I211 */
   11610 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11611 		    && (new_phytype != WMPHY_I210)
   11612 		    && (new_phytype != WMPHY_UNKNOWN))
   11613 			doubt_phytype = new_phytype;
   11614 		new_phytype = WMPHY_I210;
   11615 		new_readreg = wm_gmii_gs40g_readreg;
   11616 		new_writereg = wm_gmii_gs40g_writereg;
   11617 	} else if (sc->sc_type >= WM_T_82580) {
   11618 		/* 82580, I350 and I354 */
   11619 		new_readreg = wm_gmii_82580_readreg;
   11620 		new_writereg = wm_gmii_82580_writereg;
   11621 	} else if (sc->sc_type >= WM_T_82544) {
   11622 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11623 		new_readreg = wm_gmii_i82544_readreg;
   11624 		new_writereg = wm_gmii_i82544_writereg;
   11625 	} else {
   11626 		new_readreg = wm_gmii_i82543_readreg;
   11627 		new_writereg = wm_gmii_i82543_writereg;
   11628 	}
   11629 
   11630 	if (new_phytype == WMPHY_BM) {
   11631 		/* All BM use _bm_ */
   11632 		new_readreg = wm_gmii_bm_readreg;
   11633 		new_writereg = wm_gmii_bm_writereg;
   11634 	}
   11635 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_TGP)) {
   11636 		/* All PCH* use _hv_ */
   11637 		new_readreg = wm_gmii_hv_readreg;
   11638 		new_writereg = wm_gmii_hv_writereg;
   11639 	}
   11640 
   11641 	/* Diag output */
   11642 	if (dodiag) {
   11643 		if (doubt_phytype != WMPHY_UNKNOWN)
   11644 			aprint_error_dev(dev, "Assumed new PHY type was "
   11645 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11646 			    new_phytype);
   11647 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11648 		    && (sc->sc_phytype != new_phytype))
   11649 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11650 			    "was incorrect. New PHY type = %u\n",
   11651 			    sc->sc_phytype, new_phytype);
   11652 
   11653 		if ((mii->mii_readreg != NULL) &&
   11654 		    (new_phytype == WMPHY_UNKNOWN))
   11655 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11656 
   11657 		if ((mii->mii_readreg != NULL) &&
   11658 		    (mii->mii_readreg != new_readreg))
   11659 			aprint_error_dev(dev, "Previously assumed PHY "
   11660 			    "read/write function was incorrect.\n");
   11661 	}
   11662 
   11663 	/* Update now */
   11664 	sc->sc_phytype = new_phytype;
   11665 	mii->mii_readreg = new_readreg;
   11666 	mii->mii_writereg = new_writereg;
   11667 	if (new_readreg == wm_gmii_hv_readreg) {
   11668 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11669 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11670 	} else if (new_readreg == wm_sgmii_readreg) {
   11671 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11672 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11673 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11674 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11675 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11676 	}
   11677 }
   11678 
   11679 /*
   11680  * wm_get_phy_id_82575:
   11681  *
   11682  * Return PHY ID. Return -1 if it failed.
   11683  */
   11684 static int
   11685 wm_get_phy_id_82575(struct wm_softc *sc)
   11686 {
   11687 	uint32_t reg;
   11688 	int phyid = -1;
   11689 
   11690 	/* XXX */
   11691 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11692 		return -1;
   11693 
   11694 	if (wm_sgmii_uses_mdio(sc)) {
   11695 		switch (sc->sc_type) {
   11696 		case WM_T_82575:
   11697 		case WM_T_82576:
   11698 			reg = CSR_READ(sc, WMREG_MDIC);
   11699 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11700 			break;
   11701 		case WM_T_82580:
   11702 		case WM_T_I350:
   11703 		case WM_T_I354:
   11704 		case WM_T_I210:
   11705 		case WM_T_I211:
   11706 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11707 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11708 			break;
   11709 		default:
   11710 			return -1;
   11711 		}
   11712 	}
   11713 
   11714 	return phyid;
   11715 }
   11716 
   11717 /*
   11718  * wm_gmii_mediainit:
   11719  *
   11720  *	Initialize media for use on 1000BASE-T devices.
   11721  */
   11722 static void
   11723 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11724 {
   11725 	device_t dev = sc->sc_dev;
   11726 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11727 	struct mii_data *mii = &sc->sc_mii;
   11728 
   11729 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11730 		device_xname(sc->sc_dev), __func__));
   11731 
   11732 	/* We have GMII. */
   11733 	sc->sc_flags |= WM_F_HAS_MII;
   11734 
   11735 	if (sc->sc_type == WM_T_80003)
   11736 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11737 	else
   11738 		sc->sc_tipg = TIPG_1000T_DFLT;
   11739 
   11740 	/*
   11741 	 * Let the chip set speed/duplex on its own based on
   11742 	 * signals from the PHY.
   11743 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11744 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11745 	 */
   11746 	sc->sc_ctrl |= CTRL_SLU;
   11747 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11748 
   11749 	/* Initialize our media structures and probe the GMII. */
   11750 	mii->mii_ifp = ifp;
   11751 
   11752 	mii->mii_statchg = wm_gmii_statchg;
   11753 
   11754 	/* get PHY control from SMBus to PCIe */
   11755 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11756 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11757 	    || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP))
   11758 		wm_init_phy_workarounds_pchlan(sc);
   11759 
   11760 	wm_gmii_reset(sc);
   11761 
   11762 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11763 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11764 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11765 
   11766 	/* Setup internal SGMII PHY for SFP */
   11767 	wm_sgmii_sfp_preconfig(sc);
   11768 
   11769 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11770 	    || (sc->sc_type == WM_T_82580)
   11771 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11772 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11773 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11774 			/* Attach only one port */
   11775 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11776 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11777 		} else {
   11778 			int i, id;
   11779 			uint32_t ctrl_ext;
   11780 
   11781 			id = wm_get_phy_id_82575(sc);
   11782 			if (id != -1) {
   11783 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11784 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11785 			}
   11786 			if ((id == -1)
   11787 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11788 				/* Power on sgmii phy if it is disabled */
   11789 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11790 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11791 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11792 				CSR_WRITE_FLUSH(sc);
   11793 				delay(300*1000); /* XXX too long */
   11794 
   11795 				/*
   11796 				 * From 1 to 8.
   11797 				 *
   11798 				 * I2C access fails with I2C register's ERROR
   11799 				 * bit set, so prevent error message while
   11800 				 * scanning.
   11801 				 */
   11802 				sc->phy.no_errprint = true;
   11803 				for (i = 1; i < 8; i++)
   11804 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11805 					    0xffffffff, i, MII_OFFSET_ANY,
   11806 					    MIIF_DOPAUSE);
   11807 				sc->phy.no_errprint = false;
   11808 
   11809 				/* Restore previous sfp cage power state */
   11810 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11811 			}
   11812 		}
   11813 	} else
   11814 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11815 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11816 
   11817 	/*
   11818 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11819 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11820 	 */
   11821 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   11822 		(sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
   11823 		|| (sc->sc_type == WM_T_PCH_TGP))
   11824 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11825 		wm_set_mdio_slow_mode_hv(sc);
   11826 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11827 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11828 	}
   11829 
   11830 	/*
   11831 	 * (For ICH8 variants)
   11832 	 * If PHY detection failed, use BM's r/w function and retry.
   11833 	 */
   11834 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11835 		/* if failed, retry with *_bm_* */
   11836 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11837 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11838 		    sc->sc_phytype);
   11839 		sc->sc_phytype = WMPHY_BM;
   11840 		mii->mii_readreg = wm_gmii_bm_readreg;
   11841 		mii->mii_writereg = wm_gmii_bm_writereg;
   11842 
   11843 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11844 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11845 	}
   11846 
   11847 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11848 		/* Any PHY wasn't found */
   11849 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11850 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11851 		sc->sc_phytype = WMPHY_NONE;
   11852 	} else {
   11853 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11854 
   11855 		/*
   11856 		 * PHY found! Check PHY type again by the second call of
   11857 		 * wm_gmii_setup_phytype.
   11858 		 */
   11859 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11860 		    child->mii_mpd_model);
   11861 
   11862 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11863 	}
   11864 }
   11865 
   11866 /*
   11867  * wm_gmii_mediachange:	[ifmedia interface function]
   11868  *
   11869  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11870  */
   11871 static int
   11872 wm_gmii_mediachange(struct ifnet *ifp)
   11873 {
   11874 	struct wm_softc *sc = ifp->if_softc;
   11875 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11876 	uint32_t reg;
   11877 	int rc;
   11878 
   11879 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11880 		device_xname(sc->sc_dev), __func__));
   11881 
   11882 	KASSERT(mutex_owned(sc->sc_core_lock));
   11883 
   11884 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11885 		return 0;
   11886 
   11887 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11888 	if ((sc->sc_type == WM_T_82580)
   11889 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11890 	    || (sc->sc_type == WM_T_I211)) {
   11891 		reg = CSR_READ(sc, WMREG_PHPM);
   11892 		reg &= ~PHPM_GO_LINK_D;
   11893 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11894 	}
   11895 
   11896 	/* Disable D0 LPLU. */
   11897 	wm_lplu_d0_disable(sc);
   11898 
   11899 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11900 	sc->sc_ctrl |= CTRL_SLU;
   11901 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11902 	    || (sc->sc_type > WM_T_82543)) {
   11903 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11904 	} else {
   11905 		sc->sc_ctrl &= ~CTRL_ASDE;
   11906 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11907 		if (ife->ifm_media & IFM_FDX)
   11908 			sc->sc_ctrl |= CTRL_FD;
   11909 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11910 		case IFM_10_T:
   11911 			sc->sc_ctrl |= CTRL_SPEED_10;
   11912 			break;
   11913 		case IFM_100_TX:
   11914 			sc->sc_ctrl |= CTRL_SPEED_100;
   11915 			break;
   11916 		case IFM_1000_T:
   11917 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11918 			break;
   11919 		case IFM_NONE:
   11920 			/* There is no specific setting for IFM_NONE */
   11921 			break;
   11922 		default:
   11923 			panic("wm_gmii_mediachange: bad media 0x%x",
   11924 			    ife->ifm_media);
   11925 		}
   11926 	}
   11927 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11928 	CSR_WRITE_FLUSH(sc);
   11929 
   11930 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11931 		wm_serdes_mediachange(ifp);
   11932 
   11933 	if (sc->sc_type <= WM_T_82543)
   11934 		wm_gmii_reset(sc);
   11935 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11936 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11937 		/* allow time for SFP cage time to power up phy */
   11938 		delay(300 * 1000);
   11939 		wm_gmii_reset(sc);
   11940 	}
   11941 
   11942 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11943 		return 0;
   11944 	return rc;
   11945 }
   11946 
   11947 /*
   11948  * wm_gmii_mediastatus:	[ifmedia interface function]
   11949  *
   11950  *	Get the current interface media status on a 1000BASE-T device.
   11951  */
   11952 static void
   11953 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11954 {
   11955 	struct wm_softc *sc = ifp->if_softc;
   11956 	struct ethercom *ec = &sc->sc_ethercom;
   11957 	struct mii_data *mii;
   11958 	bool dopoll = true;
   11959 
   11960 	/*
   11961 	 * In normal drivers, ether_mediastatus() is called here.
   11962 	 * To avoid calling mii_pollstat(), ether_mediastatus() is open coded.
   11963 	 */
   11964 	KASSERT(mutex_owned(sc->sc_core_lock));
   11965 	KASSERT(ec->ec_mii != NULL);
   11966 	KASSERT(mii_locked(ec->ec_mii));
   11967 
   11968 	mii = ec->ec_mii;
   11969 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
   11970 		struct timeval now;
   11971 
   11972 		getmicrotime(&now);
   11973 		if (timercmp(&now, &sc->sc_linkup_delay_time, <))
   11974 			dopoll = false;
   11975 		else if (sc->sc_linkup_delay_time.tv_sec != 0) {
   11976 			/* Simplify by checking tv_sec only. It's enough. */
   11977 
   11978 			sc->sc_linkup_delay_time.tv_sec = 0;
   11979 			sc->sc_linkup_delay_time.tv_usec = 0;
   11980 		}
   11981 	}
   11982 
   11983 	/*
   11984 	 * Don't call mii_pollstat() while doing workaround.
   11985 	 * See also wm_linkintr_gmii() and wm_tick().
   11986 	 */
   11987 	if (dopoll)
   11988 		mii_pollstat(mii);
   11989 	ifmr->ifm_active = mii->mii_media_active;
   11990 	ifmr->ifm_status = mii->mii_media_status;
   11991 
   11992 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11993 	    | sc->sc_flowflags;
   11994 }
   11995 
   11996 #define	MDI_IO		CTRL_SWDPIN(2)
   11997 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11998 #define	MDI_CLK		CTRL_SWDPIN(3)
   11999 
   12000 static void
   12001 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   12002 {
   12003 	uint32_t i, v;
   12004 
   12005 	v = CSR_READ(sc, WMREG_CTRL);
   12006 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   12007 	v |= MDI_DIR | CTRL_SWDPIO(3);
   12008 
   12009 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   12010 		if (data & i)
   12011 			v |= MDI_IO;
   12012 		else
   12013 			v &= ~MDI_IO;
   12014 		CSR_WRITE(sc, WMREG_CTRL, v);
   12015 		CSR_WRITE_FLUSH(sc);
   12016 		delay(10);
   12017 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12018 		CSR_WRITE_FLUSH(sc);
   12019 		delay(10);
   12020 		CSR_WRITE(sc, WMREG_CTRL, v);
   12021 		CSR_WRITE_FLUSH(sc);
   12022 		delay(10);
   12023 	}
   12024 }
   12025 
   12026 static uint16_t
   12027 wm_i82543_mii_recvbits(struct wm_softc *sc)
   12028 {
   12029 	uint32_t v, i;
   12030 	uint16_t data = 0;
   12031 
   12032 	v = CSR_READ(sc, WMREG_CTRL);
   12033 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   12034 	v |= CTRL_SWDPIO(3);
   12035 
   12036 	CSR_WRITE(sc, WMREG_CTRL, v);
   12037 	CSR_WRITE_FLUSH(sc);
   12038 	delay(10);
   12039 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12040 	CSR_WRITE_FLUSH(sc);
   12041 	delay(10);
   12042 	CSR_WRITE(sc, WMREG_CTRL, v);
   12043 	CSR_WRITE_FLUSH(sc);
   12044 	delay(10);
   12045 
   12046 	for (i = 0; i < 16; i++) {
   12047 		data <<= 1;
   12048 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12049 		CSR_WRITE_FLUSH(sc);
   12050 		delay(10);
   12051 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   12052 			data |= 1;
   12053 		CSR_WRITE(sc, WMREG_CTRL, v);
   12054 		CSR_WRITE_FLUSH(sc);
   12055 		delay(10);
   12056 	}
   12057 
   12058 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   12059 	CSR_WRITE_FLUSH(sc);
   12060 	delay(10);
   12061 	CSR_WRITE(sc, WMREG_CTRL, v);
   12062 	CSR_WRITE_FLUSH(sc);
   12063 	delay(10);
   12064 
   12065 	return data;
   12066 }
   12067 
   12068 #undef MDI_IO
   12069 #undef MDI_DIR
   12070 #undef MDI_CLK
   12071 
   12072 /*
   12073  * wm_gmii_i82543_readreg:	[mii interface function]
   12074  *
   12075  *	Read a PHY register on the GMII (i82543 version).
   12076  */
   12077 static int
   12078 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12079 {
   12080 	struct wm_softc *sc = device_private(dev);
   12081 
   12082 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   12083 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   12084 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   12085 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   12086 
   12087 	DPRINTF(sc, WM_DEBUG_GMII,
   12088 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   12089 		device_xname(dev), phy, reg, *val));
   12090 
   12091 	return 0;
   12092 }
   12093 
   12094 /*
   12095  * wm_gmii_i82543_writereg:	[mii interface function]
   12096  *
   12097  *	Write a PHY register on the GMII (i82543 version).
   12098  */
   12099 static int
   12100 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   12101 {
   12102 	struct wm_softc *sc = device_private(dev);
   12103 
   12104 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   12105 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   12106 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   12107 	    (MII_COMMAND_START << 30), 32);
   12108 
   12109 	return 0;
   12110 }
   12111 
   12112 /*
   12113  * wm_gmii_mdic_readreg:	[mii interface function]
   12114  *
   12115  *	Read a PHY register on the GMII.
   12116  */
   12117 static int
   12118 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12119 {
   12120 	struct wm_softc *sc = device_private(dev);
   12121 	uint32_t mdic = 0;
   12122 	int i;
   12123 
   12124 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12125 	    && (reg > MII_ADDRMASK)) {
   12126 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12127 		    __func__, sc->sc_phytype, reg);
   12128 		reg &= MII_ADDRMASK;
   12129 	}
   12130 
   12131 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   12132 	    MDIC_REGADD(reg));
   12133 
   12134 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12135 		delay(50);
   12136 		mdic = CSR_READ(sc, WMREG_MDIC);
   12137 		if (mdic & MDIC_READY)
   12138 			break;
   12139 	}
   12140 
   12141 	if ((mdic & MDIC_READY) == 0) {
   12142 		DPRINTF(sc, WM_DEBUG_GMII,
   12143 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   12144 			device_xname(dev), phy, reg));
   12145 		return ETIMEDOUT;
   12146 	} else if (mdic & MDIC_E) {
   12147 		/* This is normal if no PHY is present. */
   12148 		DPRINTF(sc, WM_DEBUG_GMII,
   12149 		    ("%s: MDIC read error: phy %d reg %d\n",
   12150 			device_xname(sc->sc_dev), phy, reg));
   12151 		return -1;
   12152 	} else
   12153 		*val = MDIC_DATA(mdic);
   12154 
   12155 	/*
   12156 	 * Allow some time after each MDIC transaction to avoid
   12157 	 * reading duplicate data in the next MDIC transaction.
   12158 	 */
   12159 	if (sc->sc_type == WM_T_PCH2)
   12160 		delay(100);
   12161 
   12162 	return 0;
   12163 }
   12164 
   12165 /*
   12166  * wm_gmii_mdic_writereg:	[mii interface function]
   12167  *
   12168  *	Write a PHY register on the GMII.
   12169  */
   12170 static int
   12171 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   12172 {
   12173 	struct wm_softc *sc = device_private(dev);
   12174 	uint32_t mdic = 0;
   12175 	int i;
   12176 
   12177 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   12178 	    && (reg > MII_ADDRMASK)) {
   12179 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12180 		    __func__, sc->sc_phytype, reg);
   12181 		reg &= MII_ADDRMASK;
   12182 	}
   12183 
   12184 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   12185 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   12186 
   12187 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   12188 		delay(50);
   12189 		mdic = CSR_READ(sc, WMREG_MDIC);
   12190 		if (mdic & MDIC_READY)
   12191 			break;
   12192 	}
   12193 
   12194 	if ((mdic & MDIC_READY) == 0) {
   12195 		DPRINTF(sc, WM_DEBUG_GMII,
   12196 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   12197 			device_xname(dev), phy, reg));
   12198 		return ETIMEDOUT;
   12199 	} else if (mdic & MDIC_E) {
   12200 		DPRINTF(sc, WM_DEBUG_GMII,
   12201 		    ("%s: MDIC write error: phy %d reg %d\n",
   12202 			device_xname(dev), phy, reg));
   12203 		return -1;
   12204 	}
   12205 
   12206 	/*
   12207 	 * Allow some time after each MDIC transaction to avoid
   12208 	 * reading duplicate data in the next MDIC transaction.
   12209 	 */
   12210 	if (sc->sc_type == WM_T_PCH2)
   12211 		delay(100);
   12212 
   12213 	return 0;
   12214 }
   12215 
   12216 /*
   12217  * wm_gmii_i82544_readreg:	[mii interface function]
   12218  *
   12219  *	Read a PHY register on the GMII.
   12220  */
   12221 static int
   12222 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12223 {
   12224 	struct wm_softc *sc = device_private(dev);
   12225 	int rv;
   12226 
   12227 	rv = sc->phy.acquire(sc);
   12228 	if (rv != 0) {
   12229 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12230 		return rv;
   12231 	}
   12232 
   12233 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   12234 
   12235 	sc->phy.release(sc);
   12236 
   12237 	return rv;
   12238 }
   12239 
   12240 static int
   12241 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12242 {
   12243 	struct wm_softc *sc = device_private(dev);
   12244 	int rv;
   12245 
   12246 	switch (sc->sc_phytype) {
   12247 	case WMPHY_IGP:
   12248 	case WMPHY_IGP_2:
   12249 	case WMPHY_IGP_3:
   12250 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12251 			rv = wm_gmii_mdic_writereg(dev, phy,
   12252 			    IGPHY_PAGE_SELECT, reg);
   12253 			if (rv != 0)
   12254 				return rv;
   12255 		}
   12256 		break;
   12257 	default:
   12258 #ifdef WM_DEBUG
   12259 		if ((reg >> MII_ADDRBITS) != 0)
   12260 			device_printf(dev,
   12261 			    "%s: PHYTYPE = 0x%x, addr = 0x%02x\n",
   12262 			    __func__, sc->sc_phytype, reg);
   12263 #endif
   12264 		break;
   12265 	}
   12266 
   12267 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12268 }
   12269 
   12270 /*
   12271  * wm_gmii_i82544_writereg:	[mii interface function]
   12272  *
   12273  *	Write a PHY register on the GMII.
   12274  */
   12275 static int
   12276 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   12277 {
   12278 	struct wm_softc *sc = device_private(dev);
   12279 	int rv;
   12280 
   12281 	rv = sc->phy.acquire(sc);
   12282 	if (rv != 0) {
   12283 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12284 		return rv;
   12285 	}
   12286 
   12287 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   12288 	sc->phy.release(sc);
   12289 
   12290 	return rv;
   12291 }
   12292 
   12293 static int
   12294 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12295 {
   12296 	struct wm_softc *sc = device_private(dev);
   12297 	int rv;
   12298 
   12299 	switch (sc->sc_phytype) {
   12300 	case WMPHY_IGP:
   12301 	case WMPHY_IGP_2:
   12302 	case WMPHY_IGP_3:
   12303 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12304 			rv = wm_gmii_mdic_writereg(dev, phy,
   12305 			    IGPHY_PAGE_SELECT, reg);
   12306 			if (rv != 0)
   12307 				return rv;
   12308 		}
   12309 		break;
   12310 	default:
   12311 #ifdef WM_DEBUG
   12312 		if ((reg >> MII_ADDRBITS) != 0)
   12313 			device_printf(dev,
   12314 			    "%s: PHYTYPE == 0x%x, addr = 0x%02x",
   12315 			    __func__, sc->sc_phytype, reg);
   12316 #endif
   12317 		break;
   12318 	}
   12319 
   12320 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12321 }
   12322 
   12323 /*
   12324  * wm_gmii_i80003_readreg:	[mii interface function]
   12325  *
   12326  *	Read a PHY register on the kumeran
   12327  * This could be handled by the PHY layer if we didn't have to lock the
   12328  * resource ...
   12329  */
   12330 static int
   12331 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12332 {
   12333 	struct wm_softc *sc = device_private(dev);
   12334 	int page_select;
   12335 	uint16_t temp, temp2;
   12336 	int rv;
   12337 
   12338 	if (phy != 1) /* Only one PHY on kumeran bus */
   12339 		return -1;
   12340 
   12341 	rv = sc->phy.acquire(sc);
   12342 	if (rv != 0) {
   12343 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12344 		return rv;
   12345 	}
   12346 
   12347 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12348 		page_select = GG82563_PHY_PAGE_SELECT;
   12349 	else {
   12350 		/*
   12351 		 * Use Alternative Page Select register to access registers
   12352 		 * 30 and 31.
   12353 		 */
   12354 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12355 	}
   12356 	temp = reg >> GG82563_PAGE_SHIFT;
   12357 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12358 		goto out;
   12359 
   12360 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12361 		/*
   12362 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12363 		 * register.
   12364 		 */
   12365 		delay(200);
   12366 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12367 		if ((rv != 0) || (temp2 != temp)) {
   12368 			device_printf(dev, "%s failed\n", __func__);
   12369 			rv = -1;
   12370 			goto out;
   12371 		}
   12372 		delay(200);
   12373 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12374 		delay(200);
   12375 	} else
   12376 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12377 
   12378 out:
   12379 	sc->phy.release(sc);
   12380 	return rv;
   12381 }
   12382 
   12383 /*
   12384  * wm_gmii_i80003_writereg:	[mii interface function]
   12385  *
   12386  *	Write a PHY register on the kumeran.
   12387  * This could be handled by the PHY layer if we didn't have to lock the
   12388  * resource ...
   12389  */
   12390 static int
   12391 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   12392 {
   12393 	struct wm_softc *sc = device_private(dev);
   12394 	int page_select, rv;
   12395 	uint16_t temp, temp2;
   12396 
   12397 	if (phy != 1) /* Only one PHY on kumeran bus */
   12398 		return -1;
   12399 
   12400 	rv = sc->phy.acquire(sc);
   12401 	if (rv != 0) {
   12402 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12403 		return rv;
   12404 	}
   12405 
   12406 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12407 		page_select = GG82563_PHY_PAGE_SELECT;
   12408 	else {
   12409 		/*
   12410 		 * Use Alternative Page Select register to access registers
   12411 		 * 30 and 31.
   12412 		 */
   12413 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12414 	}
   12415 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   12416 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12417 		goto out;
   12418 
   12419 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12420 		/*
   12421 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12422 		 * register.
   12423 		 */
   12424 		delay(200);
   12425 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12426 		if ((rv != 0) || (temp2 != temp)) {
   12427 			device_printf(dev, "%s failed\n", __func__);
   12428 			rv = -1;
   12429 			goto out;
   12430 		}
   12431 		delay(200);
   12432 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12433 		delay(200);
   12434 	} else
   12435 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12436 
   12437 out:
   12438 	sc->phy.release(sc);
   12439 	return rv;
   12440 }
   12441 
   12442 /*
   12443  * wm_gmii_bm_readreg:	[mii interface function]
   12444  *
   12445  *	Read a PHY register on the kumeran
   12446  * This could be handled by the PHY layer if we didn't have to lock the
   12447  * resource ...
   12448  */
   12449 static int
   12450 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12451 {
   12452 	struct wm_softc *sc = device_private(dev);
   12453 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12454 	int rv;
   12455 
   12456 	rv = sc->phy.acquire(sc);
   12457 	if (rv != 0) {
   12458 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12459 		return rv;
   12460 	}
   12461 
   12462 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12463 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12464 		    || (reg == 31)) ? 1 : phy;
   12465 	/* Page 800 works differently than the rest so it has its own func */
   12466 	if (page == BM_WUC_PAGE) {
   12467 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12468 		goto release;
   12469 	}
   12470 
   12471 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12472 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12473 		    && (sc->sc_type != WM_T_82583))
   12474 			rv = wm_gmii_mdic_writereg(dev, phy,
   12475 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12476 		else
   12477 			rv = wm_gmii_mdic_writereg(dev, phy,
   12478 			    BME1000_PHY_PAGE_SELECT, page);
   12479 		if (rv != 0)
   12480 			goto release;
   12481 	}
   12482 
   12483 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12484 
   12485 release:
   12486 	sc->phy.release(sc);
   12487 	return rv;
   12488 }
   12489 
   12490 /*
   12491  * wm_gmii_bm_writereg:	[mii interface function]
   12492  *
   12493  *	Write a PHY register on the kumeran.
   12494  * This could be handled by the PHY layer if we didn't have to lock the
   12495  * resource ...
   12496  */
   12497 static int
   12498 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   12499 {
   12500 	struct wm_softc *sc = device_private(dev);
   12501 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12502 	int rv;
   12503 
   12504 	rv = sc->phy.acquire(sc);
   12505 	if (rv != 0) {
   12506 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12507 		return rv;
   12508 	}
   12509 
   12510 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12511 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12512 		    || (reg == 31)) ? 1 : phy;
   12513 	/* Page 800 works differently than the rest so it has its own func */
   12514 	if (page == BM_WUC_PAGE) {
   12515 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   12516 		goto release;
   12517 	}
   12518 
   12519 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12520 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12521 		    && (sc->sc_type != WM_T_82583))
   12522 			rv = wm_gmii_mdic_writereg(dev, phy,
   12523 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12524 		else
   12525 			rv = wm_gmii_mdic_writereg(dev, phy,
   12526 			    BME1000_PHY_PAGE_SELECT, page);
   12527 		if (rv != 0)
   12528 			goto release;
   12529 	}
   12530 
   12531 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12532 
   12533 release:
   12534 	sc->phy.release(sc);
   12535 	return rv;
   12536 }
   12537 
   12538 /*
   12539  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   12540  *  @dev: pointer to the HW structure
   12541  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   12542  *
   12543  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   12544  *  address to store contents of the BM_WUC_ENABLE_REG register.
   12545  */
   12546 static int
   12547 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12548 {
   12549 #ifdef WM_DEBUG
   12550 	struct wm_softc *sc = device_private(dev);
   12551 #endif
   12552 	uint16_t temp;
   12553 	int rv;
   12554 
   12555 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12556 		device_xname(dev), __func__));
   12557 
   12558 	if (!phy_regp)
   12559 		return -1;
   12560 
   12561 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12562 
   12563 	/* Select Port Control Registers page */
   12564 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12565 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12566 	if (rv != 0)
   12567 		return rv;
   12568 
   12569 	/* Read WUCE and save it */
   12570 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12571 	if (rv != 0)
   12572 		return rv;
   12573 
   12574 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12575 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12576 	 */
   12577 	temp = *phy_regp;
   12578 	temp |= BM_WUC_ENABLE_BIT;
   12579 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12580 
   12581 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12582 		return rv;
   12583 
   12584 	/* Select Host Wakeup Registers page - caller now able to write
   12585 	 * registers on the Wakeup registers page
   12586 	 */
   12587 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12588 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12589 }
   12590 
   12591 /*
   12592  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12593  *  @dev: pointer to the HW structure
   12594  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12595  *
   12596  *  Restore BM_WUC_ENABLE_REG to its original value.
   12597  *
   12598  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12599  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12600  *  caller.
   12601  */
   12602 static int
   12603 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12604 {
   12605 #ifdef WM_DEBUG
   12606 	struct wm_softc *sc = device_private(dev);
   12607 #endif
   12608 
   12609 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12610 		device_xname(dev), __func__));
   12611 
   12612 	if (!phy_regp)
   12613 		return -1;
   12614 
   12615 	/* Select Port Control Registers page */
   12616 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12617 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12618 
   12619 	/* Restore 769.17 to its original value */
   12620 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12621 
   12622 	return 0;
   12623 }
   12624 
   12625 /*
   12626  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12627  *  @sc: pointer to the HW structure
   12628  *  @offset: register offset to be read or written
   12629  *  @val: pointer to the data to read or write
   12630  *  @rd: determines if operation is read or write
   12631  *  @page_set: BM_WUC_PAGE already set and access enabled
   12632  *
   12633  *  Read the PHY register at offset and store the retrieved information in
   12634  *  data, or write data to PHY register at offset.  Note the procedure to
   12635  *  access the PHY wakeup registers is different than reading the other PHY
   12636  *  registers. It works as such:
   12637  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12638  *  2) Set page to 800 for host (801 if we were manageability)
   12639  *  3) Write the address using the address opcode (0x11)
   12640  *  4) Read or write the data using the data opcode (0x12)
   12641  *  5) Restore 769.17.2 to its original value
   12642  *
   12643  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12644  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12645  *
   12646  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12647  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12648  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12649  */
   12650 static int
   12651 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12652     bool page_set)
   12653 {
   12654 	struct wm_softc *sc = device_private(dev);
   12655 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12656 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12657 	uint16_t wuce;
   12658 	int rv = 0;
   12659 
   12660 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12661 		device_xname(dev), __func__));
   12662 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12663 	if ((sc->sc_type == WM_T_PCH)
   12664 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12665 		device_printf(dev,
   12666 		    "Attempting to access page %d while gig enabled.\n", page);
   12667 	}
   12668 
   12669 	if (!page_set) {
   12670 		/* Enable access to PHY wakeup registers */
   12671 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12672 		if (rv != 0) {
   12673 			device_printf(dev,
   12674 			    "%s: Could not enable PHY wakeup reg access\n",
   12675 			    __func__);
   12676 			return rv;
   12677 		}
   12678 	}
   12679 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12680 		device_xname(sc->sc_dev), __func__, page, regnum));
   12681 
   12682 	/*
   12683 	 * 2) Access PHY wakeup register.
   12684 	 * See wm_access_phy_wakeup_reg_bm.
   12685 	 */
   12686 
   12687 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12688 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12689 	if (rv != 0)
   12690 		return rv;
   12691 
   12692 	if (rd) {
   12693 		/* Read the Wakeup register page value using opcode 0x12 */
   12694 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12695 	} else {
   12696 		/* Write the Wakeup register page value using opcode 0x12 */
   12697 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12698 	}
   12699 	if (rv != 0)
   12700 		return rv;
   12701 
   12702 	if (!page_set)
   12703 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12704 
   12705 	return rv;
   12706 }
   12707 
   12708 /*
   12709  * wm_gmii_hv_readreg:	[mii interface function]
   12710  *
   12711  *	Read a PHY register on the kumeran
   12712  * This could be handled by the PHY layer if we didn't have to lock the
   12713  * resource ...
   12714  */
   12715 static int
   12716 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12717 {
   12718 	struct wm_softc *sc = device_private(dev);
   12719 	int rv;
   12720 
   12721 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12722 		device_xname(dev), __func__));
   12723 
   12724 	rv = sc->phy.acquire(sc);
   12725 	if (rv != 0) {
   12726 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12727 		return rv;
   12728 	}
   12729 
   12730 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12731 	sc->phy.release(sc);
   12732 	return rv;
   12733 }
   12734 
   12735 static int
   12736 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12737 {
   12738 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12739 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12740 	int rv;
   12741 
   12742 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12743 
   12744 	/* Page 800 works differently than the rest so it has its own func */
   12745 	if (page == BM_WUC_PAGE)
   12746 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12747 
   12748 	/*
   12749 	 * Lower than page 768 works differently than the rest so it has its
   12750 	 * own func
   12751 	 */
   12752 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12753 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12754 		return -1;
   12755 	}
   12756 
   12757 	/*
   12758 	 * XXX I21[789] documents say that the SMBus Address register is at
   12759 	 * PHY address 01, Page 0 (not 768), Register 26.
   12760 	 */
   12761 	if (page == HV_INTC_FC_PAGE_START)
   12762 		page = 0;
   12763 
   12764 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12765 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12766 		    page << BME1000_PAGE_SHIFT);
   12767 		if (rv != 0)
   12768 			return rv;
   12769 	}
   12770 
   12771 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12772 }
   12773 
   12774 /*
   12775  * wm_gmii_hv_writereg:	[mii interface function]
   12776  *
   12777  *	Write a PHY register on the kumeran.
   12778  * This could be handled by the PHY layer if we didn't have to lock the
   12779  * resource ...
   12780  */
   12781 static int
   12782 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12783 {
   12784 	struct wm_softc *sc = device_private(dev);
   12785 	int rv;
   12786 
   12787 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12788 		device_xname(dev), __func__));
   12789 
   12790 	rv = sc->phy.acquire(sc);
   12791 	if (rv != 0) {
   12792 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12793 		return rv;
   12794 	}
   12795 
   12796 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12797 	sc->phy.release(sc);
   12798 
   12799 	return rv;
   12800 }
   12801 
   12802 static int
   12803 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12804 {
   12805 	struct wm_softc *sc = device_private(dev);
   12806 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12807 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12808 	int rv;
   12809 
   12810 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12811 
   12812 	/* Page 800 works differently than the rest so it has its own func */
   12813 	if (page == BM_WUC_PAGE)
   12814 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12815 		    false);
   12816 
   12817 	/*
   12818 	 * Lower than page 768 works differently than the rest so it has its
   12819 	 * own func
   12820 	 */
   12821 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12822 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12823 		return -1;
   12824 	}
   12825 
   12826 	{
   12827 		/*
   12828 		 * XXX I21[789] documents say that the SMBus Address register
   12829 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12830 		 */
   12831 		if (page == HV_INTC_FC_PAGE_START)
   12832 			page = 0;
   12833 
   12834 		/*
   12835 		 * XXX Workaround MDIO accesses being disabled after entering
   12836 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12837 		 * register is set)
   12838 		 */
   12839 		if (sc->sc_phytype == WMPHY_82578) {
   12840 			struct mii_softc *child;
   12841 
   12842 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12843 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12844 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12845 			    && ((val & (1 << 11)) != 0)) {
   12846 				device_printf(dev, "XXX need workaround\n");
   12847 			}
   12848 		}
   12849 
   12850 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12851 			rv = wm_gmii_mdic_writereg(dev, 1,
   12852 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12853 			if (rv != 0)
   12854 				return rv;
   12855 		}
   12856 	}
   12857 
   12858 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12859 }
   12860 
   12861 /*
   12862  * wm_gmii_82580_readreg:	[mii interface function]
   12863  *
   12864  *	Read a PHY register on the 82580 and I350.
   12865  * This could be handled by the PHY layer if we didn't have to lock the
   12866  * resource ...
   12867  */
   12868 static int
   12869 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12870 {
   12871 	struct wm_softc *sc = device_private(dev);
   12872 	int rv;
   12873 
   12874 	rv = sc->phy.acquire(sc);
   12875 	if (rv != 0) {
   12876 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12877 		return rv;
   12878 	}
   12879 
   12880 #ifdef DIAGNOSTIC
   12881 	if (reg > MII_ADDRMASK) {
   12882 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12883 		    __func__, sc->sc_phytype, reg);
   12884 		reg &= MII_ADDRMASK;
   12885 	}
   12886 #endif
   12887 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12888 
   12889 	sc->phy.release(sc);
   12890 	return rv;
   12891 }
   12892 
   12893 /*
   12894  * wm_gmii_82580_writereg:	[mii interface function]
   12895  *
   12896  *	Write a PHY register on the 82580 and I350.
   12897  * This could be handled by the PHY layer if we didn't have to lock the
   12898  * resource ...
   12899  */
   12900 static int
   12901 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12902 {
   12903 	struct wm_softc *sc = device_private(dev);
   12904 	int rv;
   12905 
   12906 	rv = sc->phy.acquire(sc);
   12907 	if (rv != 0) {
   12908 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12909 		return rv;
   12910 	}
   12911 
   12912 #ifdef DIAGNOSTIC
   12913 	if (reg > MII_ADDRMASK) {
   12914 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12915 		    __func__, sc->sc_phytype, reg);
   12916 		reg &= MII_ADDRMASK;
   12917 	}
   12918 #endif
   12919 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12920 
   12921 	sc->phy.release(sc);
   12922 	return rv;
   12923 }
   12924 
   12925 /*
   12926  * wm_gmii_gs40g_readreg:	[mii interface function]
   12927  *
   12928  *	Read a PHY register on the I2100 and I211.
   12929  * This could be handled by the PHY layer if we didn't have to lock the
   12930  * resource ...
   12931  */
   12932 static int
   12933 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12934 {
   12935 	struct wm_softc *sc = device_private(dev);
   12936 	int page, offset;
   12937 	int rv;
   12938 
   12939 	/* Acquire semaphore */
   12940 	rv = sc->phy.acquire(sc);
   12941 	if (rv != 0) {
   12942 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12943 		return rv;
   12944 	}
   12945 
   12946 	/* Page select */
   12947 	page = reg >> GS40G_PAGE_SHIFT;
   12948 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12949 	if (rv != 0)
   12950 		goto release;
   12951 
   12952 	/* Read reg */
   12953 	offset = reg & GS40G_OFFSET_MASK;
   12954 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12955 
   12956 release:
   12957 	sc->phy.release(sc);
   12958 	return rv;
   12959 }
   12960 
   12961 /*
   12962  * wm_gmii_gs40g_writereg:	[mii interface function]
   12963  *
   12964  *	Write a PHY register on the I210 and I211.
   12965  * This could be handled by the PHY layer if we didn't have to lock the
   12966  * resource ...
   12967  */
   12968 static int
   12969 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12970 {
   12971 	struct wm_softc *sc = device_private(dev);
   12972 	uint16_t page;
   12973 	int offset, rv;
   12974 
   12975 	/* Acquire semaphore */
   12976 	rv = sc->phy.acquire(sc);
   12977 	if (rv != 0) {
   12978 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12979 		return rv;
   12980 	}
   12981 
   12982 	/* Page select */
   12983 	page = reg >> GS40G_PAGE_SHIFT;
   12984 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12985 	if (rv != 0)
   12986 		goto release;
   12987 
   12988 	/* Write reg */
   12989 	offset = reg & GS40G_OFFSET_MASK;
   12990 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12991 
   12992 release:
   12993 	/* Release semaphore */
   12994 	sc->phy.release(sc);
   12995 	return rv;
   12996 }
   12997 
   12998 /*
   12999  * wm_gmii_statchg:	[mii interface function]
   13000  *
   13001  *	Callback from MII layer when media changes.
   13002  */
   13003 static void
   13004 wm_gmii_statchg(struct ifnet *ifp)
   13005 {
   13006 	struct wm_softc *sc = ifp->if_softc;
   13007 	struct mii_data *mii = &sc->sc_mii;
   13008 
   13009 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   13010 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13011 	sc->sc_fcrtl &= ~FCRTL_XONE;
   13012 
   13013 	/* Get flow control negotiation result. */
   13014 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   13015 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   13016 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   13017 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   13018 	}
   13019 
   13020 	if (sc->sc_flowflags & IFM_FLOW) {
   13021 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   13022 			sc->sc_ctrl |= CTRL_TFCE;
   13023 			sc->sc_fcrtl |= FCRTL_XONE;
   13024 		}
   13025 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   13026 			sc->sc_ctrl |= CTRL_RFCE;
   13027 	}
   13028 
   13029 	if (mii->mii_media_active & IFM_FDX) {
   13030 		DPRINTF(sc, WM_DEBUG_LINK,
   13031 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   13032 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13033 	} else {
   13034 		DPRINTF(sc, WM_DEBUG_LINK,
   13035 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   13036 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13037 	}
   13038 
   13039 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13040 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13041 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13042 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13043 	if (sc->sc_type == WM_T_80003) {
   13044 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   13045 		case IFM_1000_T:
   13046 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   13047 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   13048 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   13049 			break;
   13050 		default:
   13051 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   13052 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   13053 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   13054 			break;
   13055 		}
   13056 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   13057 	}
   13058 }
   13059 
   13060 /* kumeran related (80003, ICH* and PCH*) */
   13061 
   13062 /*
   13063  * wm_kmrn_readreg:
   13064  *
   13065  *	Read a kumeran register
   13066  */
   13067 static int
   13068 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   13069 {
   13070 	int rv;
   13071 
   13072 	if (sc->sc_type == WM_T_80003)
   13073 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13074 	else
   13075 		rv = sc->phy.acquire(sc);
   13076 	if (rv != 0) {
   13077 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   13078 		    __func__);
   13079 		return rv;
   13080 	}
   13081 
   13082 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   13083 
   13084 	if (sc->sc_type == WM_T_80003)
   13085 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13086 	else
   13087 		sc->phy.release(sc);
   13088 
   13089 	return rv;
   13090 }
   13091 
   13092 static int
   13093 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   13094 {
   13095 
   13096 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   13097 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   13098 	    KUMCTRLSTA_REN);
   13099 	CSR_WRITE_FLUSH(sc);
   13100 	delay(2);
   13101 
   13102 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   13103 
   13104 	return 0;
   13105 }
   13106 
   13107 /*
   13108  * wm_kmrn_writereg:
   13109  *
   13110  *	Write a kumeran register
   13111  */
   13112 static int
   13113 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   13114 {
   13115 	int rv;
   13116 
   13117 	if (sc->sc_type == WM_T_80003)
   13118 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13119 	else
   13120 		rv = sc->phy.acquire(sc);
   13121 	if (rv != 0) {
   13122 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   13123 		    __func__);
   13124 		return rv;
   13125 	}
   13126 
   13127 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   13128 
   13129 	if (sc->sc_type == WM_T_80003)
   13130 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   13131 	else
   13132 		sc->phy.release(sc);
   13133 
   13134 	return rv;
   13135 }
   13136 
   13137 static int
   13138 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   13139 {
   13140 
   13141 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   13142 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   13143 
   13144 	return 0;
   13145 }
   13146 
   13147 /*
   13148  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   13149  * This access method is different from IEEE MMD.
   13150  */
   13151 static int
   13152 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   13153 {
   13154 	struct wm_softc *sc = device_private(dev);
   13155 	int rv;
   13156 
   13157 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   13158 	if (rv != 0)
   13159 		return rv;
   13160 
   13161 	if (rd)
   13162 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   13163 	else
   13164 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   13165 	return rv;
   13166 }
   13167 
   13168 static int
   13169 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   13170 {
   13171 
   13172 	return wm_access_emi_reg_locked(dev, reg, val, true);
   13173 }
   13174 
   13175 static int
   13176 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   13177 {
   13178 
   13179 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   13180 }
   13181 
   13182 /* SGMII related */
   13183 
   13184 /*
   13185  * wm_sgmii_uses_mdio
   13186  *
   13187  * Check whether the transaction is to the internal PHY or the external
   13188  * MDIO interface. Return true if it's MDIO.
   13189  */
   13190 static bool
   13191 wm_sgmii_uses_mdio(struct wm_softc *sc)
   13192 {
   13193 	uint32_t reg;
   13194 	bool ismdio = false;
   13195 
   13196 	switch (sc->sc_type) {
   13197 	case WM_T_82575:
   13198 	case WM_T_82576:
   13199 		reg = CSR_READ(sc, WMREG_MDIC);
   13200 		ismdio = ((reg & MDIC_DEST) != 0);
   13201 		break;
   13202 	case WM_T_82580:
   13203 	case WM_T_I350:
   13204 	case WM_T_I354:
   13205 	case WM_T_I210:
   13206 	case WM_T_I211:
   13207 		reg = CSR_READ(sc, WMREG_MDICNFG);
   13208 		ismdio = ((reg & MDICNFG_DEST) != 0);
   13209 		break;
   13210 	default:
   13211 		break;
   13212 	}
   13213 
   13214 	return ismdio;
   13215 }
   13216 
   13217 /* Setup internal SGMII PHY for SFP */
   13218 static void
   13219 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   13220 {
   13221 	uint16_t id1, id2, phyreg;
   13222 	int i, rv;
   13223 
   13224 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   13225 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   13226 		return;
   13227 
   13228 	for (i = 0; i < MII_NPHY; i++) {
   13229 		sc->phy.no_errprint = true;
   13230 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   13231 		if (rv != 0)
   13232 			continue;
   13233 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   13234 		if (rv != 0)
   13235 			continue;
   13236 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   13237 			continue;
   13238 		sc->phy.no_errprint = false;
   13239 
   13240 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   13241 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   13242 		phyreg |= ESSR_SGMII_WOC_COPPER;
   13243 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   13244 		break;
   13245 	}
   13246 
   13247 }
   13248 
   13249 /*
   13250  * wm_sgmii_readreg:	[mii interface function]
   13251  *
   13252  *	Read a PHY register on the SGMII
   13253  * This could be handled by the PHY layer if we didn't have to lock the
   13254  * resource ...
   13255  */
   13256 static int
   13257 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   13258 {
   13259 	struct wm_softc *sc = device_private(dev);
   13260 	int rv;
   13261 
   13262 	rv = sc->phy.acquire(sc);
   13263 	if (rv != 0) {
   13264 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13265 		return rv;
   13266 	}
   13267 
   13268 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   13269 
   13270 	sc->phy.release(sc);
   13271 	return rv;
   13272 }
   13273 
   13274 static int
   13275 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   13276 {
   13277 	struct wm_softc *sc = device_private(dev);
   13278 	uint32_t i2ccmd;
   13279 	int i, rv = 0;
   13280 
   13281 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13282 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13283 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13284 
   13285 	/* Poll the ready bit */
   13286 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13287 		delay(50);
   13288 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13289 		if (i2ccmd & I2CCMD_READY)
   13290 			break;
   13291 	}
   13292 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13293 		device_printf(dev, "I2CCMD Read did not complete\n");
   13294 		rv = ETIMEDOUT;
   13295 	}
   13296 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13297 		if (!sc->phy.no_errprint)
   13298 			device_printf(dev, "I2CCMD Error bit set\n");
   13299 		rv = EIO;
   13300 	}
   13301 
   13302 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   13303 
   13304 	return rv;
   13305 }
   13306 
   13307 /*
   13308  * wm_sgmii_writereg:	[mii interface function]
   13309  *
   13310  *	Write a PHY register on the SGMII.
   13311  * This could be handled by the PHY layer if we didn't have to lock the
   13312  * resource ...
   13313  */
   13314 static int
   13315 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   13316 {
   13317 	struct wm_softc *sc = device_private(dev);
   13318 	int rv;
   13319 
   13320 	rv = sc->phy.acquire(sc);
   13321 	if (rv != 0) {
   13322 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   13323 		return rv;
   13324 	}
   13325 
   13326 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   13327 
   13328 	sc->phy.release(sc);
   13329 
   13330 	return rv;
   13331 }
   13332 
   13333 static int
   13334 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   13335 {
   13336 	struct wm_softc *sc = device_private(dev);
   13337 	uint32_t i2ccmd;
   13338 	uint16_t swapdata;
   13339 	int rv = 0;
   13340 	int i;
   13341 
   13342 	/* Swap the data bytes for the I2C interface */
   13343 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   13344 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   13345 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   13346 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13347 
   13348 	/* Poll the ready bit */
   13349 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13350 		delay(50);
   13351 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13352 		if (i2ccmd & I2CCMD_READY)
   13353 			break;
   13354 	}
   13355 	if ((i2ccmd & I2CCMD_READY) == 0) {
   13356 		device_printf(dev, "I2CCMD Write did not complete\n");
   13357 		rv = ETIMEDOUT;
   13358 	}
   13359 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   13360 		device_printf(dev, "I2CCMD Error bit set\n");
   13361 		rv = EIO;
   13362 	}
   13363 
   13364 	return rv;
   13365 }
   13366 
   13367 /* TBI related */
   13368 
   13369 static bool
   13370 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   13371 {
   13372 	bool sig;
   13373 
   13374 	sig = ctrl & CTRL_SWDPIN(1);
   13375 
   13376 	/*
   13377 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   13378 	 * detect a signal, 1 if they don't.
   13379 	 */
   13380 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   13381 		sig = !sig;
   13382 
   13383 	return sig;
   13384 }
   13385 
   13386 /*
   13387  * wm_tbi_mediainit:
   13388  *
   13389  *	Initialize media for use on 1000BASE-X devices.
   13390  */
   13391 static void
   13392 wm_tbi_mediainit(struct wm_softc *sc)
   13393 {
   13394 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13395 	const char *sep = "";
   13396 
   13397 	if (sc->sc_type < WM_T_82543)
   13398 		sc->sc_tipg = TIPG_WM_DFLT;
   13399 	else
   13400 		sc->sc_tipg = TIPG_LG_DFLT;
   13401 
   13402 	sc->sc_tbi_serdes_anegticks = 5;
   13403 
   13404 	/* Initialize our media structures */
   13405 	sc->sc_mii.mii_ifp = ifp;
   13406 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   13407 
   13408 	ifp->if_baudrate = IF_Gbps(1);
   13409 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   13410 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13411 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13412 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   13413 		    sc->sc_core_lock);
   13414 	} else {
   13415 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13416 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   13417 	}
   13418 
   13419 	/*
   13420 	 * SWD Pins:
   13421 	 *
   13422 	 *	0 = Link LED (output)
   13423 	 *	1 = Loss Of Signal (input)
   13424 	 */
   13425 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   13426 
   13427 	/* XXX Perhaps this is only for TBI */
   13428 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13429 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   13430 
   13431 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   13432 		sc->sc_ctrl &= ~CTRL_LRST;
   13433 
   13434 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13435 
   13436 #define	ADD(ss, mm, dd)							  \
   13437 do {									  \
   13438 	aprint_normal("%s%s", sep, ss);					  \
   13439 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   13440 	sep = ", ";							  \
   13441 } while (/*CONSTCOND*/0)
   13442 
   13443 	aprint_normal_dev(sc->sc_dev, "");
   13444 
   13445 	if (sc->sc_type == WM_T_I354) {
   13446 		uint32_t status;
   13447 
   13448 		status = CSR_READ(sc, WMREG_STATUS);
   13449 		if (((status & STATUS_2P5_SKU) != 0)
   13450 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13451 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   13452 		} else
   13453 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   13454 	} else if (sc->sc_type == WM_T_82545) {
   13455 		/* Only 82545 is LX (XXX except SFP) */
   13456 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13457 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13458 	} else if (sc->sc_sfptype != 0) {
   13459 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   13460 		switch (sc->sc_sfptype) {
   13461 		default:
   13462 		case SFF_SFP_ETH_FLAGS_1000SX:
   13463 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13464 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13465 			break;
   13466 		case SFF_SFP_ETH_FLAGS_1000LX:
   13467 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13468 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13469 			break;
   13470 		case SFF_SFP_ETH_FLAGS_1000CX:
   13471 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   13472 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   13473 			break;
   13474 		case SFF_SFP_ETH_FLAGS_1000T:
   13475 			ADD("1000baseT", IFM_1000_T, 0);
   13476 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   13477 			break;
   13478 		case SFF_SFP_ETH_FLAGS_100FX:
   13479 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   13480 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   13481 			break;
   13482 		}
   13483 	} else {
   13484 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13485 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13486 	}
   13487 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   13488 	aprint_normal("\n");
   13489 
   13490 #undef ADD
   13491 
   13492 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   13493 }
   13494 
   13495 /*
   13496  * wm_tbi_mediachange:	[ifmedia interface function]
   13497  *
   13498  *	Set hardware to newly-selected media on a 1000BASE-X device.
   13499  */
   13500 static int
   13501 wm_tbi_mediachange(struct ifnet *ifp)
   13502 {
   13503 	struct wm_softc *sc = ifp->if_softc;
   13504 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13505 	uint32_t status, ctrl;
   13506 	bool signal;
   13507 	int i;
   13508 
   13509 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   13510 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13511 		/* XXX need some work for >= 82571 and < 82575 */
   13512 		if (sc->sc_type < WM_T_82575)
   13513 			return 0;
   13514 	}
   13515 
   13516 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13517 	    || (sc->sc_type >= WM_T_82575))
   13518 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13519 
   13520 	sc->sc_ctrl &= ~CTRL_LRST;
   13521 	sc->sc_txcw = TXCW_ANE;
   13522 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13523 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   13524 	else if (ife->ifm_media & IFM_FDX)
   13525 		sc->sc_txcw |= TXCW_FD;
   13526 	else
   13527 		sc->sc_txcw |= TXCW_HD;
   13528 
   13529 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   13530 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   13531 
   13532 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   13533 		device_xname(sc->sc_dev), sc->sc_txcw));
   13534 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13535 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13536 	CSR_WRITE_FLUSH(sc);
   13537 	delay(1000);
   13538 
   13539 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13540 	signal = wm_tbi_havesignal(sc, ctrl);
   13541 
   13542 	DPRINTF(sc, WM_DEBUG_LINK,
   13543 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   13544 
   13545 	if (signal) {
   13546 		/* Have signal; wait for the link to come up. */
   13547 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   13548 			delay(10000);
   13549 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   13550 				break;
   13551 		}
   13552 
   13553 		DPRINTF(sc, WM_DEBUG_LINK,
   13554 		    ("%s: i = %d after waiting for link\n",
   13555 			device_xname(sc->sc_dev), i));
   13556 
   13557 		status = CSR_READ(sc, WMREG_STATUS);
   13558 		DPRINTF(sc, WM_DEBUG_LINK,
   13559 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13560 			__PRIxBIT "\n",
   13561 			device_xname(sc->sc_dev), status, STATUS_LU));
   13562 		if (status & STATUS_LU) {
   13563 			/* Link is up. */
   13564 			DPRINTF(sc, WM_DEBUG_LINK,
   13565 			    ("%s: LINK: set media -> link up %s\n",
   13566 				device_xname(sc->sc_dev),
   13567 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13568 
   13569 			/*
   13570 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13571 			 * so we should update sc->sc_ctrl
   13572 			 */
   13573 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13574 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13575 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13576 			if (status & STATUS_FD)
   13577 				sc->sc_tctl |=
   13578 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13579 			else
   13580 				sc->sc_tctl |=
   13581 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13582 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13583 				sc->sc_fcrtl |= FCRTL_XONE;
   13584 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13585 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13586 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13587 			sc->sc_tbi_linkup = 1;
   13588 		} else {
   13589 			if (i == WM_LINKUP_TIMEOUT)
   13590 				wm_check_for_link(sc);
   13591 			/* Link is down. */
   13592 			DPRINTF(sc, WM_DEBUG_LINK,
   13593 			    ("%s: LINK: set media -> link down\n",
   13594 				device_xname(sc->sc_dev)));
   13595 			sc->sc_tbi_linkup = 0;
   13596 		}
   13597 	} else {
   13598 		DPRINTF(sc, WM_DEBUG_LINK,
   13599 		    ("%s: LINK: set media -> no signal\n",
   13600 			device_xname(sc->sc_dev)));
   13601 		sc->sc_tbi_linkup = 0;
   13602 	}
   13603 
   13604 	wm_tbi_serdes_set_linkled(sc);
   13605 
   13606 	return 0;
   13607 }
   13608 
   13609 /*
   13610  * wm_tbi_mediastatus:	[ifmedia interface function]
   13611  *
   13612  *	Get the current interface media status on a 1000BASE-X device.
   13613  */
   13614 static void
   13615 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13616 {
   13617 	struct wm_softc *sc = ifp->if_softc;
   13618 	uint32_t ctrl, status;
   13619 
   13620 	ifmr->ifm_status = IFM_AVALID;
   13621 	ifmr->ifm_active = IFM_ETHER;
   13622 
   13623 	status = CSR_READ(sc, WMREG_STATUS);
   13624 	if ((status & STATUS_LU) == 0) {
   13625 		ifmr->ifm_active |= IFM_NONE;
   13626 		return;
   13627 	}
   13628 
   13629 	ifmr->ifm_status |= IFM_ACTIVE;
   13630 	/* Only 82545 is LX */
   13631 	if (sc->sc_type == WM_T_82545)
   13632 		ifmr->ifm_active |= IFM_1000_LX;
   13633 	else
   13634 		ifmr->ifm_active |= IFM_1000_SX;
   13635 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13636 		ifmr->ifm_active |= IFM_FDX;
   13637 	else
   13638 		ifmr->ifm_active |= IFM_HDX;
   13639 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13640 	if (ctrl & CTRL_RFCE)
   13641 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13642 	if (ctrl & CTRL_TFCE)
   13643 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13644 }
   13645 
   13646 /* XXX TBI only */
   13647 static int
   13648 wm_check_for_link(struct wm_softc *sc)
   13649 {
   13650 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13651 	uint32_t rxcw;
   13652 	uint32_t ctrl;
   13653 	uint32_t status;
   13654 	bool signal;
   13655 
   13656 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13657 		device_xname(sc->sc_dev), __func__));
   13658 
   13659 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13660 		/* XXX need some work for >= 82571 */
   13661 		if (sc->sc_type >= WM_T_82571) {
   13662 			sc->sc_tbi_linkup = 1;
   13663 			return 0;
   13664 		}
   13665 	}
   13666 
   13667 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13668 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13669 	status = CSR_READ(sc, WMREG_STATUS);
   13670 	signal = wm_tbi_havesignal(sc, ctrl);
   13671 
   13672 	DPRINTF(sc, WM_DEBUG_LINK,
   13673 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13674 		device_xname(sc->sc_dev), __func__, signal,
   13675 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13676 
   13677 	/*
   13678 	 * SWDPIN   LU RXCW
   13679 	 *	0    0	  0
   13680 	 *	0    0	  1	(should not happen)
   13681 	 *	0    1	  0	(should not happen)
   13682 	 *	0    1	  1	(should not happen)
   13683 	 *	1    0	  0	Disable autonego and force linkup
   13684 	 *	1    0	  1	got /C/ but not linkup yet
   13685 	 *	1    1	  0	(linkup)
   13686 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13687 	 *
   13688 	 */
   13689 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13690 		DPRINTF(sc, WM_DEBUG_LINK,
   13691 		    ("%s: %s: force linkup and fullduplex\n",
   13692 			device_xname(sc->sc_dev), __func__));
   13693 		sc->sc_tbi_linkup = 0;
   13694 		/* Disable auto-negotiation in the TXCW register */
   13695 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13696 
   13697 		/*
   13698 		 * Force link-up and also force full-duplex.
   13699 		 *
   13700 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13701 		 * so we should update sc->sc_ctrl
   13702 		 */
   13703 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13704 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13705 	} else if (((status & STATUS_LU) != 0)
   13706 	    && ((rxcw & RXCW_C) != 0)
   13707 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13708 		sc->sc_tbi_linkup = 1;
   13709 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13710 			device_xname(sc->sc_dev), __func__));
   13711 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13712 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13713 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13714 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13715 			device_xname(sc->sc_dev), __func__));
   13716 	} else {
   13717 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13718 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13719 			status));
   13720 	}
   13721 
   13722 	return 0;
   13723 }
   13724 
   13725 /*
   13726  * wm_tbi_tick:
   13727  *
   13728  *	Check the link on TBI devices.
   13729  *	This function acts as mii_tick().
   13730  */
   13731 static void
   13732 wm_tbi_tick(struct wm_softc *sc)
   13733 {
   13734 	struct mii_data *mii = &sc->sc_mii;
   13735 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13736 	uint32_t status;
   13737 
   13738 	KASSERT(mutex_owned(sc->sc_core_lock));
   13739 
   13740 	status = CSR_READ(sc, WMREG_STATUS);
   13741 
   13742 	/* XXX is this needed? */
   13743 	(void)CSR_READ(sc, WMREG_RXCW);
   13744 	(void)CSR_READ(sc, WMREG_CTRL);
   13745 
   13746 	/* set link status */
   13747 	if ((status & STATUS_LU) == 0) {
   13748 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13749 			device_xname(sc->sc_dev)));
   13750 		sc->sc_tbi_linkup = 0;
   13751 	} else if (sc->sc_tbi_linkup == 0) {
   13752 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13753 			device_xname(sc->sc_dev),
   13754 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13755 		sc->sc_tbi_linkup = 1;
   13756 		sc->sc_tbi_serdes_ticks = 0;
   13757 	}
   13758 
   13759 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13760 		goto setled;
   13761 
   13762 	if ((status & STATUS_LU) == 0) {
   13763 		sc->sc_tbi_linkup = 0;
   13764 		/* If the timer expired, retry autonegotiation */
   13765 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13766 		    && (++sc->sc_tbi_serdes_ticks
   13767 			>= sc->sc_tbi_serdes_anegticks)) {
   13768 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13769 				device_xname(sc->sc_dev), __func__));
   13770 			sc->sc_tbi_serdes_ticks = 0;
   13771 			/*
   13772 			 * Reset the link, and let autonegotiation do
   13773 			 * its thing
   13774 			 */
   13775 			sc->sc_ctrl |= CTRL_LRST;
   13776 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13777 			CSR_WRITE_FLUSH(sc);
   13778 			delay(1000);
   13779 			sc->sc_ctrl &= ~CTRL_LRST;
   13780 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13781 			CSR_WRITE_FLUSH(sc);
   13782 			delay(1000);
   13783 			CSR_WRITE(sc, WMREG_TXCW,
   13784 			    sc->sc_txcw & ~TXCW_ANE);
   13785 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13786 		}
   13787 	}
   13788 
   13789 setled:
   13790 	wm_tbi_serdes_set_linkled(sc);
   13791 }
   13792 
   13793 /* SERDES related */
   13794 static void
   13795 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13796 {
   13797 	uint32_t reg;
   13798 
   13799 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13800 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13801 		return;
   13802 
   13803 	/* Enable PCS to turn on link */
   13804 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13805 	reg |= PCS_CFG_PCS_EN;
   13806 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13807 
   13808 	/* Power up the laser */
   13809 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13810 	reg &= ~CTRL_EXT_SWDPIN(3);
   13811 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13812 
   13813 	/* Flush the write to verify completion */
   13814 	CSR_WRITE_FLUSH(sc);
   13815 	delay(1000);
   13816 }
   13817 
   13818 static int
   13819 wm_serdes_mediachange(struct ifnet *ifp)
   13820 {
   13821 	struct wm_softc *sc = ifp->if_softc;
   13822 	bool pcs_autoneg = true; /* XXX */
   13823 	uint32_t ctrl_ext, pcs_lctl, reg;
   13824 
   13825 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13826 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13827 		return 0;
   13828 
   13829 	/* XXX Currently, this function is not called on 8257[12] */
   13830 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13831 	    || (sc->sc_type >= WM_T_82575))
   13832 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13833 
   13834 	/* Power on the sfp cage if present */
   13835 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13836 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13837 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13838 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13839 
   13840 	sc->sc_ctrl |= CTRL_SLU;
   13841 
   13842 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13843 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13844 
   13845 		reg = CSR_READ(sc, WMREG_CONNSW);
   13846 		reg |= CONNSW_ENRGSRC;
   13847 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13848 	}
   13849 
   13850 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13851 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13852 	case CTRL_EXT_LINK_MODE_SGMII:
   13853 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13854 		pcs_autoneg = true;
   13855 		/* Autoneg time out should be disabled for SGMII mode */
   13856 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13857 		break;
   13858 	case CTRL_EXT_LINK_MODE_1000KX:
   13859 		pcs_autoneg = false;
   13860 		/* FALLTHROUGH */
   13861 	default:
   13862 		if ((sc->sc_type == WM_T_82575)
   13863 		    || (sc->sc_type == WM_T_82576)) {
   13864 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13865 				pcs_autoneg = false;
   13866 		}
   13867 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13868 		    | CTRL_FRCFDX;
   13869 
   13870 		/* Set speed of 1000/Full if speed/duplex is forced */
   13871 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13872 	}
   13873 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13874 
   13875 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13876 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13877 
   13878 	if (pcs_autoneg) {
   13879 		/* Set PCS register for autoneg */
   13880 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13881 
   13882 		/* Disable force flow control for autoneg */
   13883 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13884 
   13885 		/* Configure flow control advertisement for autoneg */
   13886 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13887 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13888 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13889 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13890 	} else
   13891 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13892 
   13893 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13894 
   13895 	return 0;
   13896 }
   13897 
   13898 static void
   13899 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13900 {
   13901 	struct wm_softc *sc = ifp->if_softc;
   13902 	struct mii_data *mii = &sc->sc_mii;
   13903 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13904 	uint32_t pcs_adv, pcs_lpab, reg;
   13905 
   13906 	ifmr->ifm_status = IFM_AVALID;
   13907 	ifmr->ifm_active = IFM_ETHER;
   13908 
   13909 	/* Check PCS */
   13910 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13911 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13912 		ifmr->ifm_active |= IFM_NONE;
   13913 		sc->sc_tbi_linkup = 0;
   13914 		goto setled;
   13915 	}
   13916 
   13917 	sc->sc_tbi_linkup = 1;
   13918 	ifmr->ifm_status |= IFM_ACTIVE;
   13919 	if (sc->sc_type == WM_T_I354) {
   13920 		uint32_t status;
   13921 
   13922 		status = CSR_READ(sc, WMREG_STATUS);
   13923 		if (((status & STATUS_2P5_SKU) != 0)
   13924 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13925 			ifmr->ifm_active |= IFM_2500_KX;
   13926 		} else
   13927 			ifmr->ifm_active |= IFM_1000_KX;
   13928 	} else {
   13929 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13930 		case PCS_LSTS_SPEED_10:
   13931 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13932 			break;
   13933 		case PCS_LSTS_SPEED_100:
   13934 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13935 			break;
   13936 		case PCS_LSTS_SPEED_1000:
   13937 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13938 			break;
   13939 		default:
   13940 			device_printf(sc->sc_dev, "Unknown speed\n");
   13941 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13942 			break;
   13943 		}
   13944 	}
   13945 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13946 	if ((reg & PCS_LSTS_FDX) != 0)
   13947 		ifmr->ifm_active |= IFM_FDX;
   13948 	else
   13949 		ifmr->ifm_active |= IFM_HDX;
   13950 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13951 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13952 		/* Check flow */
   13953 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13954 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13955 			DPRINTF(sc, WM_DEBUG_LINK,
   13956 			    ("XXX LINKOK but not ACOMP\n"));
   13957 			goto setled;
   13958 		}
   13959 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13960 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13961 		DPRINTF(sc, WM_DEBUG_LINK,
   13962 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13963 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13964 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13965 			mii->mii_media_active |= IFM_FLOW
   13966 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13967 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13968 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13969 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13970 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13971 			mii->mii_media_active |= IFM_FLOW
   13972 			    | IFM_ETH_TXPAUSE;
   13973 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13974 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13975 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13976 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13977 			mii->mii_media_active |= IFM_FLOW
   13978 			    | IFM_ETH_RXPAUSE;
   13979 		}
   13980 	}
   13981 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13982 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13983 setled:
   13984 	wm_tbi_serdes_set_linkled(sc);
   13985 }
   13986 
   13987 /*
   13988  * wm_serdes_tick:
   13989  *
   13990  *	Check the link on serdes devices.
   13991  */
   13992 static void
   13993 wm_serdes_tick(struct wm_softc *sc)
   13994 {
   13995 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13996 	struct mii_data *mii = &sc->sc_mii;
   13997 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13998 	uint32_t reg;
   13999 
   14000 	KASSERT(mutex_owned(sc->sc_core_lock));
   14001 
   14002 	mii->mii_media_status = IFM_AVALID;
   14003 	mii->mii_media_active = IFM_ETHER;
   14004 
   14005 	/* Check PCS */
   14006 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   14007 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   14008 		mii->mii_media_status |= IFM_ACTIVE;
   14009 		sc->sc_tbi_linkup = 1;
   14010 		sc->sc_tbi_serdes_ticks = 0;
   14011 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   14012 		if ((reg & PCS_LSTS_FDX) != 0)
   14013 			mii->mii_media_active |= IFM_FDX;
   14014 		else
   14015 			mii->mii_media_active |= IFM_HDX;
   14016 	} else {
   14017 		mii->mii_media_status |= IFM_NONE;
   14018 		sc->sc_tbi_linkup = 0;
   14019 		/* If the timer expired, retry autonegotiation */
   14020 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   14021 		    && (++sc->sc_tbi_serdes_ticks
   14022 			>= sc->sc_tbi_serdes_anegticks)) {
   14023 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   14024 				device_xname(sc->sc_dev), __func__));
   14025 			sc->sc_tbi_serdes_ticks = 0;
   14026 			/* XXX */
   14027 			wm_serdes_mediachange(ifp);
   14028 		}
   14029 	}
   14030 
   14031 	wm_tbi_serdes_set_linkled(sc);
   14032 }
   14033 
   14034 /* SFP related */
   14035 
   14036 static int
   14037 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   14038 {
   14039 	uint32_t i2ccmd;
   14040 	int i;
   14041 
   14042 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   14043 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   14044 
   14045 	/* Poll the ready bit */
   14046 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   14047 		delay(50);
   14048 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   14049 		if (i2ccmd & I2CCMD_READY)
   14050 			break;
   14051 	}
   14052 	if ((i2ccmd & I2CCMD_READY) == 0)
   14053 		return -1;
   14054 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   14055 		return -1;
   14056 
   14057 	*data = i2ccmd & 0x00ff;
   14058 
   14059 	return 0;
   14060 }
   14061 
   14062 static uint32_t
   14063 wm_sfp_get_media_type(struct wm_softc *sc)
   14064 {
   14065 	uint32_t ctrl_ext;
   14066 	uint8_t val = 0;
   14067 	int timeout = 3;
   14068 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   14069 	int rv = -1;
   14070 
   14071 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14072 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   14073 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   14074 	CSR_WRITE_FLUSH(sc);
   14075 
   14076 	/* Read SFP module data */
   14077 	while (timeout) {
   14078 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   14079 		if (rv == 0)
   14080 			break;
   14081 		delay(100*1000); /* XXX too big */
   14082 		timeout--;
   14083 	}
   14084 	if (rv != 0)
   14085 		goto out;
   14086 
   14087 	switch (val) {
   14088 	case SFF_SFP_ID_SFF:
   14089 		aprint_normal_dev(sc->sc_dev,
   14090 		    "Module/Connector soldered to board\n");
   14091 		break;
   14092 	case SFF_SFP_ID_SFP:
   14093 		sc->sc_flags |= WM_F_SFP;
   14094 		break;
   14095 	case SFF_SFP_ID_UNKNOWN:
   14096 		goto out;
   14097 	default:
   14098 		break;
   14099 	}
   14100 
   14101 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   14102 	if (rv != 0)
   14103 		goto out;
   14104 
   14105 	sc->sc_sfptype = val;
   14106 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   14107 		mediatype = WM_MEDIATYPE_SERDES;
   14108 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   14109 		sc->sc_flags |= WM_F_SGMII;
   14110 		mediatype = WM_MEDIATYPE_COPPER;
   14111 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   14112 		sc->sc_flags |= WM_F_SGMII;
   14113 		mediatype = WM_MEDIATYPE_SERDES;
   14114 	} else {
   14115 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   14116 		    __func__, sc->sc_sfptype);
   14117 		sc->sc_sfptype = 0; /* XXX unknown */
   14118 	}
   14119 
   14120 out:
   14121 	/* Restore I2C interface setting */
   14122 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14123 
   14124 	return mediatype;
   14125 }
   14126 
   14127 /*
   14128  * NVM related.
   14129  * Microwire, SPI (w/wo EERD) and Flash.
   14130  */
   14131 
   14132 /* Both spi and uwire */
   14133 
   14134 /*
   14135  * wm_eeprom_sendbits:
   14136  *
   14137  *	Send a series of bits to the EEPROM.
   14138  */
   14139 static void
   14140 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   14141 {
   14142 	uint32_t reg;
   14143 	int x;
   14144 
   14145 	reg = CSR_READ(sc, WMREG_EECD);
   14146 
   14147 	for (x = nbits; x > 0; x--) {
   14148 		if (bits & (1U << (x - 1)))
   14149 			reg |= EECD_DI;
   14150 		else
   14151 			reg &= ~EECD_DI;
   14152 		CSR_WRITE(sc, WMREG_EECD, reg);
   14153 		CSR_WRITE_FLUSH(sc);
   14154 		delay(2);
   14155 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14156 		CSR_WRITE_FLUSH(sc);
   14157 		delay(2);
   14158 		CSR_WRITE(sc, WMREG_EECD, reg);
   14159 		CSR_WRITE_FLUSH(sc);
   14160 		delay(2);
   14161 	}
   14162 }
   14163 
   14164 /*
   14165  * wm_eeprom_recvbits:
   14166  *
   14167  *	Receive a series of bits from the EEPROM.
   14168  */
   14169 static void
   14170 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   14171 {
   14172 	uint32_t reg, val;
   14173 	int x;
   14174 
   14175 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   14176 
   14177 	val = 0;
   14178 	for (x = nbits; x > 0; x--) {
   14179 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   14180 		CSR_WRITE_FLUSH(sc);
   14181 		delay(2);
   14182 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   14183 			val |= (1U << (x - 1));
   14184 		CSR_WRITE(sc, WMREG_EECD, reg);
   14185 		CSR_WRITE_FLUSH(sc);
   14186 		delay(2);
   14187 	}
   14188 	*valp = val;
   14189 }
   14190 
   14191 /* Microwire */
   14192 
   14193 /*
   14194  * wm_nvm_read_uwire:
   14195  *
   14196  *	Read a word from the EEPROM using the MicroWire protocol.
   14197  */
   14198 static int
   14199 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14200 {
   14201 	uint32_t reg, val;
   14202 	int i, rv;
   14203 
   14204 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14205 		device_xname(sc->sc_dev), __func__));
   14206 
   14207 	rv = sc->nvm.acquire(sc);
   14208 	if (rv != 0)
   14209 		return rv;
   14210 
   14211 	for (i = 0; i < wordcnt; i++) {
   14212 		/* Clear SK and DI. */
   14213 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   14214 		CSR_WRITE(sc, WMREG_EECD, reg);
   14215 
   14216 		/*
   14217 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   14218 		 * and Xen.
   14219 		 *
   14220 		 * We use this workaround only for 82540 because qemu's
   14221 		 * e1000 act as 82540.
   14222 		 */
   14223 		if (sc->sc_type == WM_T_82540) {
   14224 			reg |= EECD_SK;
   14225 			CSR_WRITE(sc, WMREG_EECD, reg);
   14226 			reg &= ~EECD_SK;
   14227 			CSR_WRITE(sc, WMREG_EECD, reg);
   14228 			CSR_WRITE_FLUSH(sc);
   14229 			delay(2);
   14230 		}
   14231 		/* XXX: end of workaround */
   14232 
   14233 		/* Set CHIP SELECT. */
   14234 		reg |= EECD_CS;
   14235 		CSR_WRITE(sc, WMREG_EECD, reg);
   14236 		CSR_WRITE_FLUSH(sc);
   14237 		delay(2);
   14238 
   14239 		/* Shift in the READ command. */
   14240 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   14241 
   14242 		/* Shift in address. */
   14243 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   14244 
   14245 		/* Shift out the data. */
   14246 		wm_eeprom_recvbits(sc, &val, 16);
   14247 		data[i] = val & 0xffff;
   14248 
   14249 		/* Clear CHIP SELECT. */
   14250 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   14251 		CSR_WRITE(sc, WMREG_EECD, reg);
   14252 		CSR_WRITE_FLUSH(sc);
   14253 		delay(2);
   14254 	}
   14255 
   14256 	sc->nvm.release(sc);
   14257 	return 0;
   14258 }
   14259 
   14260 /* SPI */
   14261 
   14262 /*
   14263  * Set SPI and FLASH related information from the EECD register.
   14264  * For 82541 and 82547, the word size is taken from EEPROM.
   14265  */
   14266 static int
   14267 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   14268 {
   14269 	int size;
   14270 	uint32_t reg;
   14271 	uint16_t data;
   14272 
   14273 	reg = CSR_READ(sc, WMREG_EECD);
   14274 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   14275 
   14276 	/* Read the size of NVM from EECD by default */
   14277 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14278 	switch (sc->sc_type) {
   14279 	case WM_T_82541:
   14280 	case WM_T_82541_2:
   14281 	case WM_T_82547:
   14282 	case WM_T_82547_2:
   14283 		/* Set dummy value to access EEPROM */
   14284 		sc->sc_nvm_wordsize = 64;
   14285 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   14286 			aprint_error_dev(sc->sc_dev,
   14287 			    "%s: failed to read EEPROM size\n", __func__);
   14288 		}
   14289 		reg = data;
   14290 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   14291 		if (size == 0)
   14292 			size = 6; /* 64 word size */
   14293 		else
   14294 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   14295 		break;
   14296 	case WM_T_80003:
   14297 	case WM_T_82571:
   14298 	case WM_T_82572:
   14299 	case WM_T_82573: /* SPI case */
   14300 	case WM_T_82574: /* SPI case */
   14301 	case WM_T_82583: /* SPI case */
   14302 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14303 		if (size > 14)
   14304 			size = 14;
   14305 		break;
   14306 	case WM_T_82575:
   14307 	case WM_T_82576:
   14308 	case WM_T_82580:
   14309 	case WM_T_I350:
   14310 	case WM_T_I354:
   14311 	case WM_T_I210:
   14312 	case WM_T_I211:
   14313 		size += NVM_WORD_SIZE_BASE_SHIFT;
   14314 		if (size > 15)
   14315 			size = 15;
   14316 		break;
   14317 	default:
   14318 		aprint_error_dev(sc->sc_dev,
   14319 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   14320 		return -1;
   14321 		break;
   14322 	}
   14323 
   14324 	sc->sc_nvm_wordsize = 1 << size;
   14325 
   14326 	return 0;
   14327 }
   14328 
   14329 /*
   14330  * wm_nvm_ready_spi:
   14331  *
   14332  *	Wait for a SPI EEPROM to be ready for commands.
   14333  */
   14334 static int
   14335 wm_nvm_ready_spi(struct wm_softc *sc)
   14336 {
   14337 	uint32_t val;
   14338 	int usec;
   14339 
   14340 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14341 		device_xname(sc->sc_dev), __func__));
   14342 
   14343 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   14344 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   14345 		wm_eeprom_recvbits(sc, &val, 8);
   14346 		if ((val & SPI_SR_RDY) == 0)
   14347 			break;
   14348 	}
   14349 	if (usec >= SPI_MAX_RETRIES) {
   14350 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   14351 		return -1;
   14352 	}
   14353 	return 0;
   14354 }
   14355 
   14356 /*
   14357  * wm_nvm_read_spi:
   14358  *
   14359  *	Read a work from the EEPROM using the SPI protocol.
   14360  */
   14361 static int
   14362 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14363 {
   14364 	uint32_t reg, val;
   14365 	int i;
   14366 	uint8_t opc;
   14367 	int rv;
   14368 
   14369 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14370 		device_xname(sc->sc_dev), __func__));
   14371 
   14372 	rv = sc->nvm.acquire(sc);
   14373 	if (rv != 0)
   14374 		return rv;
   14375 
   14376 	/* Clear SK and CS. */
   14377 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   14378 	CSR_WRITE(sc, WMREG_EECD, reg);
   14379 	CSR_WRITE_FLUSH(sc);
   14380 	delay(2);
   14381 
   14382 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   14383 		goto out;
   14384 
   14385 	/* Toggle CS to flush commands. */
   14386 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   14387 	CSR_WRITE_FLUSH(sc);
   14388 	delay(2);
   14389 	CSR_WRITE(sc, WMREG_EECD, reg);
   14390 	CSR_WRITE_FLUSH(sc);
   14391 	delay(2);
   14392 
   14393 	opc = SPI_OPC_READ;
   14394 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   14395 		opc |= SPI_OPC_A8;
   14396 
   14397 	wm_eeprom_sendbits(sc, opc, 8);
   14398 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   14399 
   14400 	for (i = 0; i < wordcnt; i++) {
   14401 		wm_eeprom_recvbits(sc, &val, 16);
   14402 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   14403 	}
   14404 
   14405 	/* Raise CS and clear SK. */
   14406 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   14407 	CSR_WRITE(sc, WMREG_EECD, reg);
   14408 	CSR_WRITE_FLUSH(sc);
   14409 	delay(2);
   14410 
   14411 out:
   14412 	sc->nvm.release(sc);
   14413 	return rv;
   14414 }
   14415 
   14416 /* Using with EERD */
   14417 
   14418 static int
   14419 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   14420 {
   14421 	uint32_t attempts = 100000;
   14422 	uint32_t i, reg = 0;
   14423 	int32_t done = -1;
   14424 
   14425 	for (i = 0; i < attempts; i++) {
   14426 		reg = CSR_READ(sc, rw);
   14427 
   14428 		if (reg & EERD_DONE) {
   14429 			done = 0;
   14430 			break;
   14431 		}
   14432 		delay(5);
   14433 	}
   14434 
   14435 	return done;
   14436 }
   14437 
   14438 static int
   14439 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   14440 {
   14441 	int i, eerd = 0;
   14442 	int rv;
   14443 
   14444 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14445 		device_xname(sc->sc_dev), __func__));
   14446 
   14447 	rv = sc->nvm.acquire(sc);
   14448 	if (rv != 0)
   14449 		return rv;
   14450 
   14451 	for (i = 0; i < wordcnt; i++) {
   14452 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   14453 		CSR_WRITE(sc, WMREG_EERD, eerd);
   14454 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   14455 		if (rv != 0) {
   14456 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   14457 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   14458 			break;
   14459 		}
   14460 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   14461 	}
   14462 
   14463 	sc->nvm.release(sc);
   14464 	return rv;
   14465 }
   14466 
   14467 /* Flash */
   14468 
   14469 static int
   14470 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   14471 {
   14472 	uint32_t eecd;
   14473 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   14474 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   14475 	uint32_t nvm_dword = 0;
   14476 	uint8_t sig_byte = 0;
   14477 	int rv;
   14478 
   14479 	switch (sc->sc_type) {
   14480 	case WM_T_PCH_SPT:
   14481 	case WM_T_PCH_CNP:
   14482 	case WM_T_PCH_TGP:
   14483 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   14484 		act_offset = ICH_NVM_SIG_WORD * 2;
   14485 
   14486 		/* Set bank to 0 in case flash read fails. */
   14487 		*bank = 0;
   14488 
   14489 		/* Check bank 0 */
   14490 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   14491 		if (rv != 0)
   14492 			return rv;
   14493 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14494 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14495 			*bank = 0;
   14496 			return 0;
   14497 		}
   14498 
   14499 		/* Check bank 1 */
   14500 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   14501 		    &nvm_dword);
   14502 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14503 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14504 			*bank = 1;
   14505 			return 0;
   14506 		}
   14507 		aprint_error_dev(sc->sc_dev,
   14508 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   14509 		return -1;
   14510 	case WM_T_ICH8:
   14511 	case WM_T_ICH9:
   14512 		eecd = CSR_READ(sc, WMREG_EECD);
   14513 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   14514 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   14515 			return 0;
   14516 		}
   14517 		/* FALLTHROUGH */
   14518 	default:
   14519 		/* Default to 0 */
   14520 		*bank = 0;
   14521 
   14522 		/* Check bank 0 */
   14523 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   14524 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14525 			*bank = 0;
   14526 			return 0;
   14527 		}
   14528 
   14529 		/* Check bank 1 */
   14530 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   14531 		    &sig_byte);
   14532 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14533 			*bank = 1;
   14534 			return 0;
   14535 		}
   14536 	}
   14537 
   14538 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   14539 		device_xname(sc->sc_dev)));
   14540 	return -1;
   14541 }
   14542 
   14543 /******************************************************************************
   14544  * This function does initial flash setup so that a new read/write/erase cycle
   14545  * can be started.
   14546  *
   14547  * sc - The pointer to the hw structure
   14548  ****************************************************************************/
   14549 static int32_t
   14550 wm_ich8_cycle_init(struct wm_softc *sc)
   14551 {
   14552 	uint16_t hsfsts;
   14553 	int32_t error = 1;
   14554 	int32_t i     = 0;
   14555 
   14556 	if (sc->sc_type >= WM_T_PCH_SPT)
   14557 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14558 	else
   14559 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14560 
   14561 	/* May be check the Flash Des Valid bit in Hw status */
   14562 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14563 		return error;
   14564 
   14565 	/* Clear FCERR in Hw status by writing 1 */
   14566 	/* Clear DAEL in Hw status by writing a 1 */
   14567 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14568 
   14569 	if (sc->sc_type >= WM_T_PCH_SPT)
   14570 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14571 	else
   14572 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14573 
   14574 	/*
   14575 	 * Either we should have a hardware SPI cycle in progress bit to check
   14576 	 * against, in order to start a new cycle or FDONE bit should be
   14577 	 * changed in the hardware so that it is 1 after hardware reset, which
   14578 	 * can then be used as an indication whether a cycle is in progress or
   14579 	 * has been completed .. we should also have some software semaphore
   14580 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14581 	 * threads access to those bits can be sequentiallized or a way so that
   14582 	 * 2 threads don't start the cycle at the same time
   14583 	 */
   14584 
   14585 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14586 		/*
   14587 		 * There is no cycle running at present, so we can start a
   14588 		 * cycle
   14589 		 */
   14590 
   14591 		/* Begin by setting Flash Cycle Done. */
   14592 		hsfsts |= HSFSTS_DONE;
   14593 		if (sc->sc_type >= WM_T_PCH_SPT)
   14594 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14595 			    hsfsts & 0xffffUL);
   14596 		else
   14597 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14598 		error = 0;
   14599 	} else {
   14600 		/*
   14601 		 * Otherwise poll for sometime so the current cycle has a
   14602 		 * chance to end before giving up.
   14603 		 */
   14604 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14605 			if (sc->sc_type >= WM_T_PCH_SPT)
   14606 				hsfsts = ICH8_FLASH_READ32(sc,
   14607 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14608 			else
   14609 				hsfsts = ICH8_FLASH_READ16(sc,
   14610 				    ICH_FLASH_HSFSTS);
   14611 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14612 				error = 0;
   14613 				break;
   14614 			}
   14615 			delay(1);
   14616 		}
   14617 		if (error == 0) {
   14618 			/*
   14619 			 * Successful in waiting for previous cycle to timeout,
   14620 			 * now set the Flash Cycle Done.
   14621 			 */
   14622 			hsfsts |= HSFSTS_DONE;
   14623 			if (sc->sc_type >= WM_T_PCH_SPT)
   14624 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14625 				    hsfsts & 0xffffUL);
   14626 			else
   14627 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14628 				    hsfsts);
   14629 		}
   14630 	}
   14631 	return error;
   14632 }
   14633 
   14634 /******************************************************************************
   14635  * This function starts a flash cycle and waits for its completion
   14636  *
   14637  * sc - The pointer to the hw structure
   14638  ****************************************************************************/
   14639 static int32_t
   14640 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14641 {
   14642 	uint16_t hsflctl;
   14643 	uint16_t hsfsts;
   14644 	int32_t error = 1;
   14645 	uint32_t i = 0;
   14646 
   14647 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14648 	if (sc->sc_type >= WM_T_PCH_SPT)
   14649 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14650 	else
   14651 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14652 	hsflctl |= HSFCTL_GO;
   14653 	if (sc->sc_type >= WM_T_PCH_SPT)
   14654 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14655 		    (uint32_t)hsflctl << 16);
   14656 	else
   14657 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14658 
   14659 	/* Wait till FDONE bit is set to 1 */
   14660 	do {
   14661 		if (sc->sc_type >= WM_T_PCH_SPT)
   14662 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14663 			    & 0xffffUL;
   14664 		else
   14665 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14666 		if (hsfsts & HSFSTS_DONE)
   14667 			break;
   14668 		delay(1);
   14669 		i++;
   14670 	} while (i < timeout);
   14671 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14672 		error = 0;
   14673 
   14674 	return error;
   14675 }
   14676 
   14677 /******************************************************************************
   14678  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14679  *
   14680  * sc - The pointer to the hw structure
   14681  * index - The index of the byte or word to read.
   14682  * size - Size of data to read, 1=byte 2=word, 4=dword
   14683  * data - Pointer to the word to store the value read.
   14684  *****************************************************************************/
   14685 static int32_t
   14686 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14687     uint32_t size, uint32_t *data)
   14688 {
   14689 	uint16_t hsfsts;
   14690 	uint16_t hsflctl;
   14691 	uint32_t flash_linear_address;
   14692 	uint32_t flash_data = 0;
   14693 	int32_t error = 1;
   14694 	int32_t count = 0;
   14695 
   14696 	if (size < 1  || size > 4 || data == 0x0 ||
   14697 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14698 		return error;
   14699 
   14700 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14701 	    sc->sc_ich8_flash_base;
   14702 
   14703 	do {
   14704 		delay(1);
   14705 		/* Steps */
   14706 		error = wm_ich8_cycle_init(sc);
   14707 		if (error)
   14708 			break;
   14709 
   14710 		if (sc->sc_type >= WM_T_PCH_SPT)
   14711 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14712 			    >> 16;
   14713 		else
   14714 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14715 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14716 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14717 		    & HSFCTL_BCOUNT_MASK;
   14718 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14719 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14720 			/*
   14721 			 * In SPT, This register is in Lan memory space, not
   14722 			 * flash. Therefore, only 32 bit access is supported.
   14723 			 */
   14724 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14725 			    (uint32_t)hsflctl << 16);
   14726 		} else
   14727 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14728 
   14729 		/*
   14730 		 * Write the last 24 bits of index into Flash Linear address
   14731 		 * field in Flash Address
   14732 		 */
   14733 		/* TODO: TBD maybe check the index against the size of flash */
   14734 
   14735 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14736 
   14737 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14738 
   14739 		/*
   14740 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14741 		 * the whole sequence a few more times, else read in (shift in)
   14742 		 * the Flash Data0, the order is least significant byte first
   14743 		 * msb to lsb
   14744 		 */
   14745 		if (error == 0) {
   14746 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14747 			if (size == 1)
   14748 				*data = (uint8_t)(flash_data & 0x000000FF);
   14749 			else if (size == 2)
   14750 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14751 			else if (size == 4)
   14752 				*data = (uint32_t)flash_data;
   14753 			break;
   14754 		} else {
   14755 			/*
   14756 			 * If we've gotten here, then things are probably
   14757 			 * completely hosed, but if the error condition is
   14758 			 * detected, it won't hurt to give it another try...
   14759 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14760 			 */
   14761 			if (sc->sc_type >= WM_T_PCH_SPT)
   14762 				hsfsts = ICH8_FLASH_READ32(sc,
   14763 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14764 			else
   14765 				hsfsts = ICH8_FLASH_READ16(sc,
   14766 				    ICH_FLASH_HSFSTS);
   14767 
   14768 			if (hsfsts & HSFSTS_ERR) {
   14769 				/* Repeat for some time before giving up. */
   14770 				continue;
   14771 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14772 				break;
   14773 		}
   14774 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14775 
   14776 	return error;
   14777 }
   14778 
   14779 /******************************************************************************
   14780  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14781  *
   14782  * sc - pointer to wm_hw structure
   14783  * index - The index of the byte to read.
   14784  * data - Pointer to a byte to store the value read.
   14785  *****************************************************************************/
   14786 static int32_t
   14787 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14788 {
   14789 	int32_t status;
   14790 	uint32_t word = 0;
   14791 
   14792 	status = wm_read_ich8_data(sc, index, 1, &word);
   14793 	if (status == 0)
   14794 		*data = (uint8_t)word;
   14795 	else
   14796 		*data = 0;
   14797 
   14798 	return status;
   14799 }
   14800 
   14801 /******************************************************************************
   14802  * Reads a word from the NVM using the ICH8 flash access registers.
   14803  *
   14804  * sc - pointer to wm_hw structure
   14805  * index - The starting byte index of the word to read.
   14806  * data - Pointer to a word to store the value read.
   14807  *****************************************************************************/
   14808 static int32_t
   14809 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14810 {
   14811 	int32_t status;
   14812 	uint32_t word = 0;
   14813 
   14814 	status = wm_read_ich8_data(sc, index, 2, &word);
   14815 	if (status == 0)
   14816 		*data = (uint16_t)word;
   14817 	else
   14818 		*data = 0;
   14819 
   14820 	return status;
   14821 }
   14822 
   14823 /******************************************************************************
   14824  * Reads a dword from the NVM using the ICH8 flash access registers.
   14825  *
   14826  * sc - pointer to wm_hw structure
   14827  * index - The starting byte index of the word to read.
   14828  * data - Pointer to a word to store the value read.
   14829  *****************************************************************************/
   14830 static int32_t
   14831 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14832 {
   14833 	int32_t status;
   14834 
   14835 	status = wm_read_ich8_data(sc, index, 4, data);
   14836 	return status;
   14837 }
   14838 
   14839 /******************************************************************************
   14840  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14841  * register.
   14842  *
   14843  * sc - Struct containing variables accessed by shared code
   14844  * offset - offset of word in the EEPROM to read
   14845  * data - word read from the EEPROM
   14846  * words - number of words to read
   14847  *****************************************************************************/
   14848 static int
   14849 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14850 {
   14851 	int rv;
   14852 	uint32_t flash_bank = 0;
   14853 	uint32_t act_offset = 0;
   14854 	uint32_t bank_offset = 0;
   14855 	uint16_t word = 0;
   14856 	uint16_t i = 0;
   14857 
   14858 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14859 		device_xname(sc->sc_dev), __func__));
   14860 
   14861 	rv = sc->nvm.acquire(sc);
   14862 	if (rv != 0)
   14863 		return rv;
   14864 
   14865 	/*
   14866 	 * We need to know which is the valid flash bank.  In the event
   14867 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14868 	 * managing flash_bank. So it cannot be trusted and needs
   14869 	 * to be updated with each read.
   14870 	 */
   14871 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14872 	if (rv) {
   14873 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14874 			device_xname(sc->sc_dev)));
   14875 		flash_bank = 0;
   14876 	}
   14877 
   14878 	/*
   14879 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14880 	 * size
   14881 	 */
   14882 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14883 
   14884 	for (i = 0; i < words; i++) {
   14885 		/* The NVM part needs a byte offset, hence * 2 */
   14886 		act_offset = bank_offset + ((offset + i) * 2);
   14887 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14888 		if (rv) {
   14889 			aprint_error_dev(sc->sc_dev,
   14890 			    "%s: failed to read NVM\n", __func__);
   14891 			break;
   14892 		}
   14893 		data[i] = word;
   14894 	}
   14895 
   14896 	sc->nvm.release(sc);
   14897 	return rv;
   14898 }
   14899 
   14900 /******************************************************************************
   14901  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14902  * register.
   14903  *
   14904  * sc - Struct containing variables accessed by shared code
   14905  * offset - offset of word in the EEPROM to read
   14906  * data - word read from the EEPROM
   14907  * words - number of words to read
   14908  *****************************************************************************/
   14909 static int
   14910 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14911 {
   14912 	int	 rv;
   14913 	uint32_t flash_bank = 0;
   14914 	uint32_t act_offset = 0;
   14915 	uint32_t bank_offset = 0;
   14916 	uint32_t dword = 0;
   14917 	uint16_t i = 0;
   14918 
   14919 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14920 		device_xname(sc->sc_dev), __func__));
   14921 
   14922 	rv = sc->nvm.acquire(sc);
   14923 	if (rv != 0)
   14924 		return rv;
   14925 
   14926 	/*
   14927 	 * We need to know which is the valid flash bank.  In the event
   14928 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14929 	 * managing flash_bank. So it cannot be trusted and needs
   14930 	 * to be updated with each read.
   14931 	 */
   14932 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14933 	if (rv) {
   14934 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14935 			device_xname(sc->sc_dev)));
   14936 		flash_bank = 0;
   14937 	}
   14938 
   14939 	/*
   14940 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14941 	 * size
   14942 	 */
   14943 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14944 
   14945 	for (i = 0; i < words; i++) {
   14946 		/* The NVM part needs a byte offset, hence * 2 */
   14947 		act_offset = bank_offset + ((offset + i) * 2);
   14948 		/* but we must read dword aligned, so mask ... */
   14949 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14950 		if (rv) {
   14951 			aprint_error_dev(sc->sc_dev,
   14952 			    "%s: failed to read NVM\n", __func__);
   14953 			break;
   14954 		}
   14955 		/* ... and pick out low or high word */
   14956 		if ((act_offset & 0x2) == 0)
   14957 			data[i] = (uint16_t)(dword & 0xFFFF);
   14958 		else
   14959 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14960 	}
   14961 
   14962 	sc->nvm.release(sc);
   14963 	return rv;
   14964 }
   14965 
   14966 /* iNVM */
   14967 
   14968 static int
   14969 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14970 {
   14971 	int32_t	 rv = 0;
   14972 	uint32_t invm_dword;
   14973 	uint16_t i;
   14974 	uint8_t record_type, word_address;
   14975 
   14976 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14977 		device_xname(sc->sc_dev), __func__));
   14978 
   14979 	for (i = 0; i < INVM_SIZE; i++) {
   14980 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14981 		/* Get record type */
   14982 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14983 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14984 			break;
   14985 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14986 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14987 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14988 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14989 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14990 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14991 			if (word_address == address) {
   14992 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14993 				rv = 0;
   14994 				break;
   14995 			}
   14996 		}
   14997 	}
   14998 
   14999 	return rv;
   15000 }
   15001 
   15002 static int
   15003 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   15004 {
   15005 	int i, rv;
   15006 
   15007 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   15008 		device_xname(sc->sc_dev), __func__));
   15009 
   15010 	rv = sc->nvm.acquire(sc);
   15011 	if (rv != 0)
   15012 		return rv;
   15013 
   15014 	for (i = 0; i < words; i++) {
   15015 		switch (offset + i) {
   15016 		case NVM_OFF_MACADDR:
   15017 		case NVM_OFF_MACADDR1:
   15018 		case NVM_OFF_MACADDR2:
   15019 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   15020 			if (rv != 0) {
   15021 				data[i] = 0xffff;
   15022 				rv = -1;
   15023 			}
   15024 			break;
   15025 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   15026 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15027 			if (rv != 0) {
   15028 				*data = INVM_DEFAULT_AL;
   15029 				rv = 0;
   15030 			}
   15031 			break;
   15032 		case NVM_OFF_CFG2:
   15033 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15034 			if (rv != 0) {
   15035 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   15036 				rv = 0;
   15037 			}
   15038 			break;
   15039 		case NVM_OFF_CFG4:
   15040 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15041 			if (rv != 0) {
   15042 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   15043 				rv = 0;
   15044 			}
   15045 			break;
   15046 		case NVM_OFF_LED_1_CFG:
   15047 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15048 			if (rv != 0) {
   15049 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   15050 				rv = 0;
   15051 			}
   15052 			break;
   15053 		case NVM_OFF_LED_0_2_CFG:
   15054 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15055 			if (rv != 0) {
   15056 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   15057 				rv = 0;
   15058 			}
   15059 			break;
   15060 		case NVM_OFF_ID_LED_SETTINGS:
   15061 			rv = wm_nvm_read_word_invm(sc, offset, data);
   15062 			if (rv != 0) {
   15063 				*data = ID_LED_RESERVED_FFFF;
   15064 				rv = 0;
   15065 			}
   15066 			break;
   15067 		default:
   15068 			DPRINTF(sc, WM_DEBUG_NVM,
   15069 			    ("NVM word 0x%02x is not mapped.\n", offset));
   15070 			*data = NVM_RESERVED_WORD;
   15071 			break;
   15072 		}
   15073 	}
   15074 
   15075 	sc->nvm.release(sc);
   15076 	return rv;
   15077 }
   15078 
   15079 /* Lock, detecting NVM type, validate checksum, version and read */
   15080 
   15081 static int
   15082 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   15083 {
   15084 	uint32_t eecd = 0;
   15085 
   15086 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   15087 	    || sc->sc_type == WM_T_82583) {
   15088 		eecd = CSR_READ(sc, WMREG_EECD);
   15089 
   15090 		/* Isolate bits 15 & 16 */
   15091 		eecd = ((eecd >> 15) & 0x03);
   15092 
   15093 		/* If both bits are set, device is Flash type */
   15094 		if (eecd == 0x03)
   15095 			return 0;
   15096 	}
   15097 	return 1;
   15098 }
   15099 
   15100 static int
   15101 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   15102 {
   15103 	uint32_t eec;
   15104 
   15105 	eec = CSR_READ(sc, WMREG_EEC);
   15106 	if ((eec & EEC_FLASH_DETECTED) != 0)
   15107 		return 1;
   15108 
   15109 	return 0;
   15110 }
   15111 
   15112 /*
   15113  * wm_nvm_validate_checksum
   15114  *
   15115  * The checksum is defined as the sum of the first 64 (16 bit) words.
   15116  */
   15117 static int
   15118 wm_nvm_validate_checksum(struct wm_softc *sc)
   15119 {
   15120 	uint16_t checksum;
   15121 	uint16_t eeprom_data;
   15122 #ifdef WM_DEBUG
   15123 	uint16_t csum_wordaddr, valid_checksum;
   15124 #endif
   15125 	int i;
   15126 
   15127 	checksum = 0;
   15128 
   15129 	/* Don't check for I211 */
   15130 	if (sc->sc_type == WM_T_I211)
   15131 		return 0;
   15132 
   15133 #ifdef WM_DEBUG
   15134 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
   15135 	    (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
   15136 		csum_wordaddr = NVM_OFF_COMPAT;
   15137 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   15138 	} else {
   15139 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   15140 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   15141 	}
   15142 
   15143 	/* Dump EEPROM image for debug */
   15144 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15145 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15146 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   15147 		/* XXX PCH_SPT? */
   15148 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   15149 		if ((eeprom_data & valid_checksum) == 0)
   15150 			DPRINTF(sc, WM_DEBUG_NVM,
   15151 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   15152 				device_xname(sc->sc_dev), eeprom_data,
   15153 				valid_checksum));
   15154 	}
   15155 
   15156 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   15157 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   15158 		for (i = 0; i < NVM_SIZE; i++) {
   15159 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   15160 				printf("XXXX ");
   15161 			else
   15162 				printf("%04hx ", eeprom_data);
   15163 			if (i % 8 == 7)
   15164 				printf("\n");
   15165 		}
   15166 	}
   15167 
   15168 #endif /* WM_DEBUG */
   15169 
   15170 	for (i = 0; i < NVM_SIZE; i++) {
   15171 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   15172 			return -1;
   15173 		checksum += eeprom_data;
   15174 	}
   15175 
   15176 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   15177 #ifdef WM_DEBUG
   15178 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   15179 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   15180 #endif
   15181 	}
   15182 
   15183 	return 0;
   15184 }
   15185 
   15186 static void
   15187 wm_nvm_version_invm(struct wm_softc *sc)
   15188 {
   15189 	uint32_t dword;
   15190 
   15191 	/*
   15192 	 * Linux's code to decode version is very strange, so we don't
   15193 	 * obey that algorithm and just use word 61 as the document.
   15194 	 * Perhaps it's not perfect though...
   15195 	 *
   15196 	 * Example:
   15197 	 *
   15198 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   15199 	 */
   15200 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   15201 	dword = __SHIFTOUT(dword, INVM_VER_1);
   15202 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   15203 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   15204 }
   15205 
   15206 static void
   15207 wm_nvm_version(struct wm_softc *sc)
   15208 {
   15209 	uint16_t major, minor, build, patch;
   15210 	uint16_t uid0, uid1;
   15211 	uint16_t nvm_data;
   15212 	uint16_t off;
   15213 	bool check_version = false;
   15214 	bool check_optionrom = false;
   15215 	bool have_build = false;
   15216 	bool have_uid = true;
   15217 
   15218 	/*
   15219 	 * Version format:
   15220 	 *
   15221 	 * XYYZ
   15222 	 * X0YZ
   15223 	 * X0YY
   15224 	 *
   15225 	 * Example:
   15226 	 *
   15227 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   15228 	 *	82571	0x50a6	5.10.6?
   15229 	 *	82572	0x506a	5.6.10?
   15230 	 *	82572EI	0x5069	5.6.9?
   15231 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   15232 	 *		0x2013	2.1.3?
   15233 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   15234 	 * ICH8+82567	0x0040	0.4.0?
   15235 	 * ICH9+82566	0x1040	1.4.0?
   15236 	 *ICH10+82567	0x0043	0.4.3?
   15237 	 *  PCH+82577	0x00c1	0.12.1?
   15238 	 * PCH2+82579	0x00d3	0.13.3?
   15239 	 *		0x00d4	0.13.4?
   15240 	 *  LPT+I218	0x0023	0.2.3?
   15241 	 *  SPT+I219	0x0084	0.8.4?
   15242 	 *  CNP+I219	0x0054	0.5.4?
   15243 	 */
   15244 
   15245 	/*
   15246 	 * XXX
   15247 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   15248 	 * I've never seen real 82574 hardware with such small SPI ROM.
   15249 	 */
   15250 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   15251 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   15252 		have_uid = false;
   15253 
   15254 	switch (sc->sc_type) {
   15255 	case WM_T_82571:
   15256 	case WM_T_82572:
   15257 	case WM_T_82574:
   15258 	case WM_T_82583:
   15259 		check_version = true;
   15260 		check_optionrom = true;
   15261 		have_build = true;
   15262 		break;
   15263 	case WM_T_ICH8:
   15264 	case WM_T_ICH9:
   15265 	case WM_T_ICH10:
   15266 	case WM_T_PCH:
   15267 	case WM_T_PCH2:
   15268 	case WM_T_PCH_LPT:
   15269 	case WM_T_PCH_SPT:
   15270 	case WM_T_PCH_CNP:
   15271 	case WM_T_PCH_TGP:
   15272 		check_version = true;
   15273 		have_build = true;
   15274 		have_uid = false;
   15275 		break;
   15276 	case WM_T_82575:
   15277 	case WM_T_82576:
   15278 	case WM_T_82580:
   15279 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   15280 			check_version = true;
   15281 		break;
   15282 	case WM_T_I211:
   15283 		wm_nvm_version_invm(sc);
   15284 		have_uid = false;
   15285 		goto printver;
   15286 	case WM_T_I210:
   15287 		if (!wm_nvm_flash_presence_i210(sc)) {
   15288 			wm_nvm_version_invm(sc);
   15289 			have_uid = false;
   15290 			goto printver;
   15291 		}
   15292 		/* FALLTHROUGH */
   15293 	case WM_T_I350:
   15294 	case WM_T_I354:
   15295 		check_version = true;
   15296 		check_optionrom = true;
   15297 		break;
   15298 	default:
   15299 		return;
   15300 	}
   15301 	if (check_version
   15302 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   15303 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   15304 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   15305 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   15306 			build = nvm_data & NVM_BUILD_MASK;
   15307 			have_build = true;
   15308 		} else
   15309 			minor = nvm_data & 0x00ff;
   15310 
   15311 		/* Decimal */
   15312 		minor = (minor / 16) * 10 + (minor % 16);
   15313 		sc->sc_nvm_ver_major = major;
   15314 		sc->sc_nvm_ver_minor = minor;
   15315 
   15316 printver:
   15317 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   15318 		    sc->sc_nvm_ver_minor);
   15319 		if (have_build) {
   15320 			sc->sc_nvm_ver_build = build;
   15321 			aprint_verbose(".%d", build);
   15322 		}
   15323 	}
   15324 
   15325 	/* Assume the Option ROM area is at avove NVM_SIZE */
   15326 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   15327 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   15328 		/* Option ROM Version */
   15329 		if ((off != 0x0000) && (off != 0xffff)) {
   15330 			int rv;
   15331 
   15332 			off += NVM_COMBO_VER_OFF;
   15333 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   15334 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   15335 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   15336 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   15337 				/* 16bits */
   15338 				major = uid0 >> 8;
   15339 				build = (uid0 << 8) | (uid1 >> 8);
   15340 				patch = uid1 & 0x00ff;
   15341 				aprint_verbose(", option ROM Version %d.%d.%d",
   15342 				    major, build, patch);
   15343 			}
   15344 		}
   15345 	}
   15346 
   15347 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   15348 		aprint_verbose(", Image Unique ID %08x",
   15349 		    ((uint32_t)uid1 << 16) | uid0);
   15350 }
   15351 
   15352 /*
   15353  * wm_nvm_read:
   15354  *
   15355  *	Read data from the serial EEPROM.
   15356  */
   15357 static int
   15358 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   15359 {
   15360 	int rv;
   15361 
   15362 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   15363 		device_xname(sc->sc_dev), __func__));
   15364 
   15365 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   15366 		return -1;
   15367 
   15368 	rv = sc->nvm.read(sc, word, wordcnt, data);
   15369 
   15370 	return rv;
   15371 }
   15372 
   15373 /*
   15374  * Hardware semaphores.
   15375  * Very complexed...
   15376  */
   15377 
   15378 static int
   15379 wm_get_null(struct wm_softc *sc)
   15380 {
   15381 
   15382 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15383 		device_xname(sc->sc_dev), __func__));
   15384 	return 0;
   15385 }
   15386 
   15387 static void
   15388 wm_put_null(struct wm_softc *sc)
   15389 {
   15390 
   15391 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15392 		device_xname(sc->sc_dev), __func__));
   15393 	return;
   15394 }
   15395 
   15396 static int
   15397 wm_get_eecd(struct wm_softc *sc)
   15398 {
   15399 	uint32_t reg;
   15400 	int x;
   15401 
   15402 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15403 		device_xname(sc->sc_dev), __func__));
   15404 
   15405 	reg = CSR_READ(sc, WMREG_EECD);
   15406 
   15407 	/* Request EEPROM access. */
   15408 	reg |= EECD_EE_REQ;
   15409 	CSR_WRITE(sc, WMREG_EECD, reg);
   15410 
   15411 	/* ..and wait for it to be granted. */
   15412 	for (x = 0; x < 1000; x++) {
   15413 		reg = CSR_READ(sc, WMREG_EECD);
   15414 		if (reg & EECD_EE_GNT)
   15415 			break;
   15416 		delay(5);
   15417 	}
   15418 	if ((reg & EECD_EE_GNT) == 0) {
   15419 		aprint_error_dev(sc->sc_dev,
   15420 		    "could not acquire EEPROM GNT\n");
   15421 		reg &= ~EECD_EE_REQ;
   15422 		CSR_WRITE(sc, WMREG_EECD, reg);
   15423 		return -1;
   15424 	}
   15425 
   15426 	return 0;
   15427 }
   15428 
   15429 static void
   15430 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   15431 {
   15432 
   15433 	*eecd |= EECD_SK;
   15434 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15435 	CSR_WRITE_FLUSH(sc);
   15436 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15437 		delay(1);
   15438 	else
   15439 		delay(50);
   15440 }
   15441 
   15442 static void
   15443 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   15444 {
   15445 
   15446 	*eecd &= ~EECD_SK;
   15447 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15448 	CSR_WRITE_FLUSH(sc);
   15449 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15450 		delay(1);
   15451 	else
   15452 		delay(50);
   15453 }
   15454 
   15455 static void
   15456 wm_put_eecd(struct wm_softc *sc)
   15457 {
   15458 	uint32_t reg;
   15459 
   15460 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15461 		device_xname(sc->sc_dev), __func__));
   15462 
   15463 	/* Stop nvm */
   15464 	reg = CSR_READ(sc, WMREG_EECD);
   15465 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   15466 		/* Pull CS high */
   15467 		reg |= EECD_CS;
   15468 		wm_nvm_eec_clock_lower(sc, &reg);
   15469 	} else {
   15470 		/* CS on Microwire is active-high */
   15471 		reg &= ~(EECD_CS | EECD_DI);
   15472 		CSR_WRITE(sc, WMREG_EECD, reg);
   15473 		wm_nvm_eec_clock_raise(sc, &reg);
   15474 		wm_nvm_eec_clock_lower(sc, &reg);
   15475 	}
   15476 
   15477 	reg = CSR_READ(sc, WMREG_EECD);
   15478 	reg &= ~EECD_EE_REQ;
   15479 	CSR_WRITE(sc, WMREG_EECD, reg);
   15480 
   15481 	return;
   15482 }
   15483 
   15484 /*
   15485  * Get hardware semaphore.
   15486  * Same as e1000_get_hw_semaphore_generic()
   15487  */
   15488 static int
   15489 wm_get_swsm_semaphore(struct wm_softc *sc)
   15490 {
   15491 	int32_t timeout;
   15492 	uint32_t swsm;
   15493 
   15494 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15495 		device_xname(sc->sc_dev), __func__));
   15496 	KASSERT(sc->sc_nvm_wordsize > 0);
   15497 
   15498 retry:
   15499 	/* Get the SW semaphore. */
   15500 	timeout = sc->sc_nvm_wordsize + 1;
   15501 	while (timeout) {
   15502 		swsm = CSR_READ(sc, WMREG_SWSM);
   15503 
   15504 		if ((swsm & SWSM_SMBI) == 0)
   15505 			break;
   15506 
   15507 		delay(50);
   15508 		timeout--;
   15509 	}
   15510 
   15511 	if (timeout == 0) {
   15512 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   15513 			/*
   15514 			 * In rare circumstances, the SW semaphore may already
   15515 			 * be held unintentionally. Clear the semaphore once
   15516 			 * before giving up.
   15517 			 */
   15518 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   15519 			wm_put_swsm_semaphore(sc);
   15520 			goto retry;
   15521 		}
   15522 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   15523 		return -1;
   15524 	}
   15525 
   15526 	/* Get the FW semaphore. */
   15527 	timeout = sc->sc_nvm_wordsize + 1;
   15528 	while (timeout) {
   15529 		swsm = CSR_READ(sc, WMREG_SWSM);
   15530 		swsm |= SWSM_SWESMBI;
   15531 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   15532 		/* If we managed to set the bit we got the semaphore. */
   15533 		swsm = CSR_READ(sc, WMREG_SWSM);
   15534 		if (swsm & SWSM_SWESMBI)
   15535 			break;
   15536 
   15537 		delay(50);
   15538 		timeout--;
   15539 	}
   15540 
   15541 	if (timeout == 0) {
   15542 		aprint_error_dev(sc->sc_dev,
   15543 		    "could not acquire SWSM SWESMBI\n");
   15544 		/* Release semaphores */
   15545 		wm_put_swsm_semaphore(sc);
   15546 		return -1;
   15547 	}
   15548 	return 0;
   15549 }
   15550 
   15551 /*
   15552  * Put hardware semaphore.
   15553  * Same as e1000_put_hw_semaphore_generic()
   15554  */
   15555 static void
   15556 wm_put_swsm_semaphore(struct wm_softc *sc)
   15557 {
   15558 	uint32_t swsm;
   15559 
   15560 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15561 		device_xname(sc->sc_dev), __func__));
   15562 
   15563 	swsm = CSR_READ(sc, WMREG_SWSM);
   15564 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15565 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15566 }
   15567 
   15568 /*
   15569  * Get SW/FW semaphore.
   15570  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15571  */
   15572 static int
   15573 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15574 {
   15575 	uint32_t swfw_sync;
   15576 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15577 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15578 	int timeout;
   15579 
   15580 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15581 		device_xname(sc->sc_dev), __func__));
   15582 
   15583 	if (sc->sc_type == WM_T_80003)
   15584 		timeout = 50;
   15585 	else
   15586 		timeout = 200;
   15587 
   15588 	while (timeout) {
   15589 		if (wm_get_swsm_semaphore(sc)) {
   15590 			aprint_error_dev(sc->sc_dev,
   15591 			    "%s: failed to get semaphore\n",
   15592 			    __func__);
   15593 			return -1;
   15594 		}
   15595 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15596 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15597 			swfw_sync |= swmask;
   15598 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15599 			wm_put_swsm_semaphore(sc);
   15600 			return 0;
   15601 		}
   15602 		wm_put_swsm_semaphore(sc);
   15603 		delay(5000);
   15604 		timeout--;
   15605 	}
   15606 	device_printf(sc->sc_dev,
   15607 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15608 	    mask, swfw_sync);
   15609 	return -1;
   15610 }
   15611 
   15612 static void
   15613 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15614 {
   15615 	uint32_t swfw_sync;
   15616 
   15617 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15618 		device_xname(sc->sc_dev), __func__));
   15619 
   15620 	while (wm_get_swsm_semaphore(sc) != 0)
   15621 		continue;
   15622 
   15623 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15624 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15625 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15626 
   15627 	wm_put_swsm_semaphore(sc);
   15628 }
   15629 
   15630 static int
   15631 wm_get_nvm_80003(struct wm_softc *sc)
   15632 {
   15633 	int rv;
   15634 
   15635 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15636 		device_xname(sc->sc_dev), __func__));
   15637 
   15638 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15639 		aprint_error_dev(sc->sc_dev,
   15640 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15641 		return rv;
   15642 	}
   15643 
   15644 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15645 	    && (rv = wm_get_eecd(sc)) != 0) {
   15646 		aprint_error_dev(sc->sc_dev,
   15647 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15648 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15649 		return rv;
   15650 	}
   15651 
   15652 	return 0;
   15653 }
   15654 
   15655 static void
   15656 wm_put_nvm_80003(struct wm_softc *sc)
   15657 {
   15658 
   15659 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15660 		device_xname(sc->sc_dev), __func__));
   15661 
   15662 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15663 		wm_put_eecd(sc);
   15664 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15665 }
   15666 
   15667 static int
   15668 wm_get_nvm_82571(struct wm_softc *sc)
   15669 {
   15670 	int rv;
   15671 
   15672 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15673 		device_xname(sc->sc_dev), __func__));
   15674 
   15675 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15676 		return rv;
   15677 
   15678 	switch (sc->sc_type) {
   15679 	case WM_T_82573:
   15680 		break;
   15681 	default:
   15682 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15683 			rv = wm_get_eecd(sc);
   15684 		break;
   15685 	}
   15686 
   15687 	if (rv != 0) {
   15688 		aprint_error_dev(sc->sc_dev,
   15689 		    "%s: failed to get semaphore\n",
   15690 		    __func__);
   15691 		wm_put_swsm_semaphore(sc);
   15692 	}
   15693 
   15694 	return rv;
   15695 }
   15696 
   15697 static void
   15698 wm_put_nvm_82571(struct wm_softc *sc)
   15699 {
   15700 
   15701 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15702 		device_xname(sc->sc_dev), __func__));
   15703 
   15704 	switch (sc->sc_type) {
   15705 	case WM_T_82573:
   15706 		break;
   15707 	default:
   15708 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15709 			wm_put_eecd(sc);
   15710 		break;
   15711 	}
   15712 
   15713 	wm_put_swsm_semaphore(sc);
   15714 }
   15715 
   15716 static int
   15717 wm_get_phy_82575(struct wm_softc *sc)
   15718 {
   15719 
   15720 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15721 		device_xname(sc->sc_dev), __func__));
   15722 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15723 }
   15724 
   15725 static void
   15726 wm_put_phy_82575(struct wm_softc *sc)
   15727 {
   15728 
   15729 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15730 		device_xname(sc->sc_dev), __func__));
   15731 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15732 }
   15733 
   15734 static int
   15735 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15736 {
   15737 	uint32_t ext_ctrl;
   15738 	int timeout = 200;
   15739 
   15740 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15741 		device_xname(sc->sc_dev), __func__));
   15742 
   15743 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15744 	for (timeout = 0; timeout < 200; timeout++) {
   15745 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15746 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15747 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15748 
   15749 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15750 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15751 			return 0;
   15752 		delay(5000);
   15753 	}
   15754 	device_printf(sc->sc_dev,
   15755 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15756 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15757 	return -1;
   15758 }
   15759 
   15760 static void
   15761 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15762 {
   15763 	uint32_t ext_ctrl;
   15764 
   15765 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15766 		device_xname(sc->sc_dev), __func__));
   15767 
   15768 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15769 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15770 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15771 
   15772 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15773 }
   15774 
   15775 static int
   15776 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15777 {
   15778 	uint32_t ext_ctrl;
   15779 	int timeout;
   15780 
   15781 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15782 		device_xname(sc->sc_dev), __func__));
   15783 	mutex_enter(sc->sc_ich_phymtx);
   15784 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15785 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15786 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15787 			break;
   15788 		delay(1000);
   15789 	}
   15790 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15791 		device_printf(sc->sc_dev,
   15792 		    "SW has already locked the resource\n");
   15793 		goto out;
   15794 	}
   15795 
   15796 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15797 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15798 	for (timeout = 0; timeout < 1000; timeout++) {
   15799 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15800 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15801 			break;
   15802 		delay(1000);
   15803 	}
   15804 	if (timeout >= 1000) {
   15805 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15806 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15807 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15808 		goto out;
   15809 	}
   15810 	return 0;
   15811 
   15812 out:
   15813 	mutex_exit(sc->sc_ich_phymtx);
   15814 	return -1;
   15815 }
   15816 
   15817 static void
   15818 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15819 {
   15820 	uint32_t ext_ctrl;
   15821 
   15822 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15823 		device_xname(sc->sc_dev), __func__));
   15824 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15825 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15826 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15827 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15828 	} else
   15829 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15830 
   15831 	mutex_exit(sc->sc_ich_phymtx);
   15832 }
   15833 
   15834 static int
   15835 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15836 {
   15837 
   15838 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15839 		device_xname(sc->sc_dev), __func__));
   15840 	mutex_enter(sc->sc_ich_nvmmtx);
   15841 
   15842 	return 0;
   15843 }
   15844 
   15845 static void
   15846 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15847 {
   15848 
   15849 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15850 		device_xname(sc->sc_dev), __func__));
   15851 	mutex_exit(sc->sc_ich_nvmmtx);
   15852 }
   15853 
   15854 static int
   15855 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15856 {
   15857 	int i = 0;
   15858 	uint32_t reg;
   15859 
   15860 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15861 		device_xname(sc->sc_dev), __func__));
   15862 
   15863 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15864 	do {
   15865 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15866 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15867 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15868 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15869 			break;
   15870 		delay(2*1000);
   15871 		i++;
   15872 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15873 
   15874 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15875 		wm_put_hw_semaphore_82573(sc);
   15876 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15877 		    device_xname(sc->sc_dev));
   15878 		return -1;
   15879 	}
   15880 
   15881 	return 0;
   15882 }
   15883 
   15884 static void
   15885 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15886 {
   15887 	uint32_t reg;
   15888 
   15889 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15890 		device_xname(sc->sc_dev), __func__));
   15891 
   15892 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15893 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15894 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15895 }
   15896 
   15897 /*
   15898  * Management mode and power management related subroutines.
   15899  * BMC, AMT, suspend/resume and EEE.
   15900  */
   15901 
   15902 #ifdef WM_WOL
   15903 static int
   15904 wm_check_mng_mode(struct wm_softc *sc)
   15905 {
   15906 	int rv;
   15907 
   15908 	switch (sc->sc_type) {
   15909 	case WM_T_ICH8:
   15910 	case WM_T_ICH9:
   15911 	case WM_T_ICH10:
   15912 	case WM_T_PCH:
   15913 	case WM_T_PCH2:
   15914 	case WM_T_PCH_LPT:
   15915 	case WM_T_PCH_SPT:
   15916 	case WM_T_PCH_CNP:
   15917 	case WM_T_PCH_TGP:
   15918 		rv = wm_check_mng_mode_ich8lan(sc);
   15919 		break;
   15920 	case WM_T_82574:
   15921 	case WM_T_82583:
   15922 		rv = wm_check_mng_mode_82574(sc);
   15923 		break;
   15924 	case WM_T_82571:
   15925 	case WM_T_82572:
   15926 	case WM_T_82573:
   15927 	case WM_T_80003:
   15928 		rv = wm_check_mng_mode_generic(sc);
   15929 		break;
   15930 	default:
   15931 		/* Noting to do */
   15932 		rv = 0;
   15933 		break;
   15934 	}
   15935 
   15936 	return rv;
   15937 }
   15938 
   15939 static int
   15940 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15941 {
   15942 	uint32_t fwsm;
   15943 
   15944 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15945 
   15946 	if (((fwsm & FWSM_FW_VALID) != 0)
   15947 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15948 		return 1;
   15949 
   15950 	return 0;
   15951 }
   15952 
   15953 static int
   15954 wm_check_mng_mode_82574(struct wm_softc *sc)
   15955 {
   15956 	uint16_t data;
   15957 
   15958 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15959 
   15960 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15961 		return 1;
   15962 
   15963 	return 0;
   15964 }
   15965 
   15966 static int
   15967 wm_check_mng_mode_generic(struct wm_softc *sc)
   15968 {
   15969 	uint32_t fwsm;
   15970 
   15971 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15972 
   15973 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15974 		return 1;
   15975 
   15976 	return 0;
   15977 }
   15978 #endif /* WM_WOL */
   15979 
   15980 static int
   15981 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15982 {
   15983 	uint32_t manc, fwsm, factps;
   15984 
   15985 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15986 		return 0;
   15987 
   15988 	manc = CSR_READ(sc, WMREG_MANC);
   15989 
   15990 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15991 		device_xname(sc->sc_dev), manc));
   15992 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15993 		return 0;
   15994 
   15995 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15996 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15997 		factps = CSR_READ(sc, WMREG_FACTPS);
   15998 		if (((factps & FACTPS_MNGCG) == 0)
   15999 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   16000 			return 1;
   16001 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   16002 		uint16_t data;
   16003 
   16004 		factps = CSR_READ(sc, WMREG_FACTPS);
   16005 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   16006 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   16007 			device_xname(sc->sc_dev), factps, data));
   16008 		if (((factps & FACTPS_MNGCG) == 0)
   16009 		    && ((data & NVM_CFG2_MNGM_MASK)
   16010 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   16011 			return 1;
   16012 	} else if (((manc & MANC_SMBUS_EN) != 0)
   16013 	    && ((manc & MANC_ASF_EN) == 0))
   16014 		return 1;
   16015 
   16016 	return 0;
   16017 }
   16018 
   16019 static bool
   16020 wm_phy_resetisblocked(struct wm_softc *sc)
   16021 {
   16022 	bool blocked = false;
   16023 	uint32_t reg;
   16024 	int i = 0;
   16025 
   16026 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16027 		device_xname(sc->sc_dev), __func__));
   16028 
   16029 	switch (sc->sc_type) {
   16030 	case WM_T_ICH8:
   16031 	case WM_T_ICH9:
   16032 	case WM_T_ICH10:
   16033 	case WM_T_PCH:
   16034 	case WM_T_PCH2:
   16035 	case WM_T_PCH_LPT:
   16036 	case WM_T_PCH_SPT:
   16037 	case WM_T_PCH_CNP:
   16038 	case WM_T_PCH_TGP:
   16039 		do {
   16040 			reg = CSR_READ(sc, WMREG_FWSM);
   16041 			if ((reg & FWSM_RSPCIPHY) == 0) {
   16042 				blocked = true;
   16043 				delay(10*1000);
   16044 				continue;
   16045 			}
   16046 			blocked = false;
   16047 		} while (blocked && (i++ < 30));
   16048 		return blocked;
   16049 		break;
   16050 	case WM_T_82571:
   16051 	case WM_T_82572:
   16052 	case WM_T_82573:
   16053 	case WM_T_82574:
   16054 	case WM_T_82583:
   16055 	case WM_T_80003:
   16056 		reg = CSR_READ(sc, WMREG_MANC);
   16057 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   16058 			return true;
   16059 		else
   16060 			return false;
   16061 		break;
   16062 	default:
   16063 		/* No problem */
   16064 		break;
   16065 	}
   16066 
   16067 	return false;
   16068 }
   16069 
   16070 static void
   16071 wm_get_hw_control(struct wm_softc *sc)
   16072 {
   16073 	uint32_t reg;
   16074 
   16075 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   16076 		device_xname(sc->sc_dev), __func__));
   16077 
   16078 	if (sc->sc_type == WM_T_82573) {
   16079 		reg = CSR_READ(sc, WMREG_SWSM);
   16080 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   16081 	} else if (sc->sc_type >= WM_T_82571) {
   16082 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16083 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   16084 	}
   16085 }
   16086 
   16087 static void
   16088 wm_release_hw_control(struct wm_softc *sc)
   16089 {
   16090 	uint32_t reg;
   16091 
   16092 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   16093 		device_xname(sc->sc_dev), __func__));
   16094 
   16095 	if (sc->sc_type == WM_T_82573) {
   16096 		reg = CSR_READ(sc, WMREG_SWSM);
   16097 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   16098 	} else if (sc->sc_type >= WM_T_82571) {
   16099 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16100 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   16101 	}
   16102 }
   16103 
   16104 static void
   16105 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   16106 {
   16107 	uint32_t reg;
   16108 
   16109 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16110 		device_xname(sc->sc_dev), __func__));
   16111 
   16112 	if (sc->sc_type < WM_T_PCH2)
   16113 		return;
   16114 
   16115 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   16116 
   16117 	if (gate)
   16118 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   16119 	else
   16120 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   16121 
   16122 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   16123 }
   16124 
   16125 static int
   16126 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   16127 {
   16128 	uint32_t fwsm, reg;
   16129 	int rv;
   16130 
   16131 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16132 		device_xname(sc->sc_dev), __func__));
   16133 
   16134 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   16135 	wm_gate_hw_phy_config_ich8lan(sc, true);
   16136 
   16137 	/* Disable ULP */
   16138 	wm_ulp_disable(sc);
   16139 
   16140 	/* Acquire PHY semaphore */
   16141 	rv = sc->phy.acquire(sc);
   16142 	if (rv != 0) {
   16143 		DPRINTF(sc, WM_DEBUG_INIT,
   16144 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16145 		return rv;
   16146 	}
   16147 
   16148 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   16149 	 * inaccessible and resetting the PHY is not blocked, toggle the
   16150 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   16151 	 */
   16152 	fwsm = CSR_READ(sc, WMREG_FWSM);
   16153 	switch (sc->sc_type) {
   16154 	case WM_T_PCH_LPT:
   16155 	case WM_T_PCH_SPT:
   16156 	case WM_T_PCH_CNP:
   16157 	case WM_T_PCH_TGP:
   16158 		if (wm_phy_is_accessible_pchlan(sc))
   16159 			break;
   16160 
   16161 		/* Before toggling LANPHYPC, see if PHY is accessible by
   16162 		 * forcing MAC to SMBus mode first.
   16163 		 */
   16164 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16165 		reg |= CTRL_EXT_FORCE_SMBUS;
   16166 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16167 #if 0
   16168 		/* XXX Isn't this required??? */
   16169 		CSR_WRITE_FLUSH(sc);
   16170 #endif
   16171 		/* Wait 50 milliseconds for MAC to finish any retries
   16172 		 * that it might be trying to perform from previous
   16173 		 * attempts to acknowledge any phy read requests.
   16174 		 */
   16175 		delay(50 * 1000);
   16176 		/* FALLTHROUGH */
   16177 	case WM_T_PCH2:
   16178 		if (wm_phy_is_accessible_pchlan(sc) == true)
   16179 			break;
   16180 		/* FALLTHROUGH */
   16181 	case WM_T_PCH:
   16182 		if (sc->sc_type == WM_T_PCH)
   16183 			if ((fwsm & FWSM_FW_VALID) != 0)
   16184 				break;
   16185 
   16186 		if (wm_phy_resetisblocked(sc) == true) {
   16187 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   16188 			break;
   16189 		}
   16190 
   16191 		/* Toggle LANPHYPC Value bit */
   16192 		wm_toggle_lanphypc_pch_lpt(sc);
   16193 
   16194 		if (sc->sc_type >= WM_T_PCH_LPT) {
   16195 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16196 				break;
   16197 
   16198 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   16199 			 * so ensure that the MAC is also out of SMBus mode
   16200 			 */
   16201 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16202 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16203 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16204 
   16205 			if (wm_phy_is_accessible_pchlan(sc) == true)
   16206 				break;
   16207 			rv = -1;
   16208 		}
   16209 		break;
   16210 	default:
   16211 		break;
   16212 	}
   16213 
   16214 	/* Release semaphore */
   16215 	sc->phy.release(sc);
   16216 
   16217 	if (rv == 0) {
   16218 		/* Check to see if able to reset PHY.  Print error if not */
   16219 		if (wm_phy_resetisblocked(sc)) {
   16220 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   16221 			goto out;
   16222 		}
   16223 
   16224 		/* Reset the PHY before any access to it.  Doing so, ensures
   16225 		 * that the PHY is in a known good state before we read/write
   16226 		 * PHY registers.  The generic reset is sufficient here,
   16227 		 * because we haven't determined the PHY type yet.
   16228 		 */
   16229 		if (wm_reset_phy(sc) != 0)
   16230 			goto out;
   16231 
   16232 		/* On a successful reset, possibly need to wait for the PHY
   16233 		 * to quiesce to an accessible state before returning control
   16234 		 * to the calling function.  If the PHY does not quiesce, then
   16235 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   16236 		 *  the PHY is in.
   16237 		 */
   16238 		if (wm_phy_resetisblocked(sc))
   16239 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   16240 	}
   16241 
   16242 out:
   16243 	/* Ungate automatic PHY configuration on non-managed 82579 */
   16244 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   16245 		delay(10*1000);
   16246 		wm_gate_hw_phy_config_ich8lan(sc, false);
   16247 	}
   16248 
   16249 	return 0;
   16250 }
   16251 
   16252 static void
   16253 wm_init_manageability(struct wm_softc *sc)
   16254 {
   16255 
   16256 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16257 		device_xname(sc->sc_dev), __func__));
   16258 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   16259 
   16260 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16261 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   16262 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16263 
   16264 		/* Disable hardware interception of ARP */
   16265 		manc &= ~MANC_ARP_EN;
   16266 
   16267 		/* Enable receiving management packets to the host */
   16268 		if (sc->sc_type >= WM_T_82571) {
   16269 			manc |= MANC_EN_MNG2HOST;
   16270 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   16271 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   16272 		}
   16273 
   16274 		CSR_WRITE(sc, WMREG_MANC, manc);
   16275 	}
   16276 }
   16277 
   16278 static void
   16279 wm_release_manageability(struct wm_softc *sc)
   16280 {
   16281 
   16282 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   16283 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   16284 
   16285 		manc |= MANC_ARP_EN;
   16286 		if (sc->sc_type >= WM_T_82571)
   16287 			manc &= ~MANC_EN_MNG2HOST;
   16288 
   16289 		CSR_WRITE(sc, WMREG_MANC, manc);
   16290 	}
   16291 }
   16292 
   16293 static void
   16294 wm_get_wakeup(struct wm_softc *sc)
   16295 {
   16296 
   16297 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   16298 	switch (sc->sc_type) {
   16299 	case WM_T_82573:
   16300 	case WM_T_82583:
   16301 		sc->sc_flags |= WM_F_HAS_AMT;
   16302 		/* FALLTHROUGH */
   16303 	case WM_T_80003:
   16304 	case WM_T_82575:
   16305 	case WM_T_82576:
   16306 	case WM_T_82580:
   16307 	case WM_T_I350:
   16308 	case WM_T_I354:
   16309 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   16310 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   16311 		/* FALLTHROUGH */
   16312 	case WM_T_82541:
   16313 	case WM_T_82541_2:
   16314 	case WM_T_82547:
   16315 	case WM_T_82547_2:
   16316 	case WM_T_82571:
   16317 	case WM_T_82572:
   16318 	case WM_T_82574:
   16319 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16320 		break;
   16321 	case WM_T_ICH8:
   16322 	case WM_T_ICH9:
   16323 	case WM_T_ICH10:
   16324 	case WM_T_PCH:
   16325 	case WM_T_PCH2:
   16326 	case WM_T_PCH_LPT:
   16327 	case WM_T_PCH_SPT:
   16328 	case WM_T_PCH_CNP:
   16329 	case WM_T_PCH_TGP:
   16330 		sc->sc_flags |= WM_F_HAS_AMT;
   16331 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   16332 		break;
   16333 	default:
   16334 		break;
   16335 	}
   16336 
   16337 	/* 1: HAS_MANAGE */
   16338 	if (wm_enable_mng_pass_thru(sc) != 0)
   16339 		sc->sc_flags |= WM_F_HAS_MANAGE;
   16340 
   16341 	/*
   16342 	 * Note that the WOL flags is set after the resetting of the eeprom
   16343 	 * stuff
   16344 	 */
   16345 }
   16346 
   16347 /*
   16348  * Unconfigure Ultra Low Power mode.
   16349  * Only for I217 and newer (see below).
   16350  */
   16351 static int
   16352 wm_ulp_disable(struct wm_softc *sc)
   16353 {
   16354 	uint32_t reg;
   16355 	uint16_t phyreg;
   16356 	int i = 0, rv;
   16357 
   16358 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16359 		device_xname(sc->sc_dev), __func__));
   16360 	/* Exclude old devices */
   16361 	if ((sc->sc_type < WM_T_PCH_LPT)
   16362 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   16363 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   16364 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   16365 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   16366 		return 0;
   16367 
   16368 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   16369 		/* Request ME un-configure ULP mode in the PHY */
   16370 		reg = CSR_READ(sc, WMREG_H2ME);
   16371 		reg &= ~H2ME_ULP;
   16372 		reg |= H2ME_ENFORCE_SETTINGS;
   16373 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16374 
   16375 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   16376 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   16377 			if (i++ == 30) {
   16378 				device_printf(sc->sc_dev, "%s timed out\n",
   16379 				    __func__);
   16380 				return -1;
   16381 			}
   16382 			delay(10 * 1000);
   16383 		}
   16384 		reg = CSR_READ(sc, WMREG_H2ME);
   16385 		reg &= ~H2ME_ENFORCE_SETTINGS;
   16386 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16387 
   16388 		return 0;
   16389 	}
   16390 
   16391 	/* Acquire semaphore */
   16392 	rv = sc->phy.acquire(sc);
   16393 	if (rv != 0) {
   16394 		DPRINTF(sc, WM_DEBUG_INIT,
   16395 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16396 		return rv;
   16397 	}
   16398 
   16399 	/* Toggle LANPHYPC */
   16400 	wm_toggle_lanphypc_pch_lpt(sc);
   16401 
   16402 	/* Unforce SMBus mode in PHY */
   16403 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   16404 	if (rv != 0) {
   16405 		uint32_t reg2;
   16406 
   16407 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   16408 		    __func__);
   16409 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   16410 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   16411 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   16412 		delay(50 * 1000);
   16413 
   16414 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   16415 		    &phyreg);
   16416 		if (rv != 0)
   16417 			goto release;
   16418 	}
   16419 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16420 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   16421 
   16422 	/* Unforce SMBus mode in MAC */
   16423 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16424 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   16425 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16426 
   16427 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   16428 	if (rv != 0)
   16429 		goto release;
   16430 	phyreg |= HV_PM_CTRL_K1_ENA;
   16431 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   16432 
   16433 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   16434 	    &phyreg);
   16435 	if (rv != 0)
   16436 		goto release;
   16437 	phyreg &= ~(I218_ULP_CONFIG1_IND
   16438 	    | I218_ULP_CONFIG1_STICKY_ULP
   16439 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   16440 	    | I218_ULP_CONFIG1_WOL_HOST
   16441 	    | I218_ULP_CONFIG1_INBAND_EXIT
   16442 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   16443 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   16444 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   16445 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16446 	phyreg |= I218_ULP_CONFIG1_START;
   16447 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16448 
   16449 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16450 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   16451 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16452 
   16453 release:
   16454 	/* Release semaphore */
   16455 	sc->phy.release(sc);
   16456 	wm_gmii_reset(sc);
   16457 	delay(50 * 1000);
   16458 
   16459 	return rv;
   16460 }
   16461 
   16462 /* WOL in the newer chipset interfaces (pchlan) */
   16463 static int
   16464 wm_enable_phy_wakeup(struct wm_softc *sc)
   16465 {
   16466 	device_t dev = sc->sc_dev;
   16467 	uint32_t mreg, moff;
   16468 	uint16_t wuce, wuc, wufc, preg;
   16469 	int i, rv;
   16470 
   16471 	KASSERT(sc->sc_type >= WM_T_PCH);
   16472 
   16473 	/* Copy MAC RARs to PHY RARs */
   16474 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   16475 
   16476 	/* Activate PHY wakeup */
   16477 	rv = sc->phy.acquire(sc);
   16478 	if (rv != 0) {
   16479 		device_printf(dev, "%s: failed to acquire semaphore\n",
   16480 		    __func__);
   16481 		return rv;
   16482 	}
   16483 
   16484 	/*
   16485 	 * Enable access to PHY wakeup registers.
   16486 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   16487 	 */
   16488 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   16489 	if (rv != 0) {
   16490 		device_printf(dev,
   16491 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   16492 		goto release;
   16493 	}
   16494 
   16495 	/* Copy MAC MTA to PHY MTA */
   16496 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   16497 		uint16_t lo, hi;
   16498 
   16499 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   16500 		lo = (uint16_t)(mreg & 0xffff);
   16501 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   16502 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   16503 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   16504 	}
   16505 
   16506 	/* Configure PHY Rx Control register */
   16507 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   16508 	mreg = CSR_READ(sc, WMREG_RCTL);
   16509 	if (mreg & RCTL_UPE)
   16510 		preg |= BM_RCTL_UPE;
   16511 	if (mreg & RCTL_MPE)
   16512 		preg |= BM_RCTL_MPE;
   16513 	preg &= ~(BM_RCTL_MO_MASK);
   16514 	moff = __SHIFTOUT(mreg, RCTL_MO);
   16515 	if (moff != 0)
   16516 		preg |= moff << BM_RCTL_MO_SHIFT;
   16517 	if (mreg & RCTL_BAM)
   16518 		preg |= BM_RCTL_BAM;
   16519 	if (mreg & RCTL_PMCF)
   16520 		preg |= BM_RCTL_PMCF;
   16521 	mreg = CSR_READ(sc, WMREG_CTRL);
   16522 	if (mreg & CTRL_RFCE)
   16523 		preg |= BM_RCTL_RFCE;
   16524 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   16525 
   16526 	wuc = WUC_APME | WUC_PME_EN;
   16527 	wufc = WUFC_MAG;
   16528 	/* Enable PHY wakeup in MAC register */
   16529 	CSR_WRITE(sc, WMREG_WUC,
   16530 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   16531 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   16532 
   16533 	/* Configure and enable PHY wakeup in PHY registers */
   16534 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   16535 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   16536 
   16537 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   16538 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16539 
   16540 release:
   16541 	sc->phy.release(sc);
   16542 
   16543 	return 0;
   16544 }
   16545 
   16546 /* Power down workaround on D3 */
   16547 static void
   16548 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   16549 {
   16550 	uint32_t reg;
   16551 	uint16_t phyreg;
   16552 	int i;
   16553 
   16554 	for (i = 0; i < 2; i++) {
   16555 		/* Disable link */
   16556 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16557 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16558 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16559 
   16560 		/*
   16561 		 * Call gig speed drop workaround on Gig disable before
   16562 		 * accessing any PHY registers
   16563 		 */
   16564 		if (sc->sc_type == WM_T_ICH8)
   16565 			wm_gig_downshift_workaround_ich8lan(sc);
   16566 
   16567 		/* Write VR power-down enable */
   16568 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16569 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16570 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16571 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16572 
   16573 		/* Read it back and test */
   16574 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16575 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16576 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16577 			break;
   16578 
   16579 		/* Issue PHY reset and repeat at most one more time */
   16580 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16581 	}
   16582 }
   16583 
   16584 /*
   16585  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16586  *  @sc: pointer to the HW structure
   16587  *
   16588  *  During S0 to Sx transition, it is possible the link remains at gig
   16589  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16590  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16591  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16592  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16593  *  needs to be written.
   16594  *  Parts that support (and are linked to a partner which support) EEE in
   16595  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16596  *  than 10Mbps w/o EEE.
   16597  */
   16598 static void
   16599 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16600 {
   16601 	device_t dev = sc->sc_dev;
   16602 	struct ethercom *ec = &sc->sc_ethercom;
   16603 	uint32_t phy_ctrl;
   16604 	int rv;
   16605 
   16606 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16607 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16608 
   16609 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_TGP));
   16610 
   16611 	if (sc->sc_phytype == WMPHY_I217) {
   16612 		uint16_t devid = sc->sc_pcidevid;
   16613 
   16614 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16615 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16616 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16617 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16618 		    (sc->sc_type >= WM_T_PCH_SPT))
   16619 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16620 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16621 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16622 
   16623 		if (sc->phy.acquire(sc) != 0)
   16624 			goto out;
   16625 
   16626 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16627 			uint16_t eee_advert;
   16628 
   16629 			rv = wm_read_emi_reg_locked(dev,
   16630 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16631 			if (rv)
   16632 				goto release;
   16633 
   16634 			/*
   16635 			 * Disable LPLU if both link partners support 100BaseT
   16636 			 * EEE and 100Full is advertised on both ends of the
   16637 			 * link, and enable Auto Enable LPI since there will
   16638 			 * be no driver to enable LPI while in Sx.
   16639 			 */
   16640 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16641 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16642 				uint16_t anar, phy_reg;
   16643 
   16644 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16645 				    &anar);
   16646 				if (anar & ANAR_TX_FD) {
   16647 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16648 					    PHY_CTRL_NOND0A_LPLU);
   16649 
   16650 					/* Set Auto Enable LPI after link up */
   16651 					sc->phy.readreg_locked(dev, 2,
   16652 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16653 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16654 					sc->phy.writereg_locked(dev, 2,
   16655 					    I217_LPI_GPIO_CTRL, phy_reg);
   16656 				}
   16657 			}
   16658 		}
   16659 
   16660 		/*
   16661 		 * For i217 Intel Rapid Start Technology support,
   16662 		 * when the system is going into Sx and no manageability engine
   16663 		 * is present, the driver must configure proxy to reset only on
   16664 		 * power good.	LPI (Low Power Idle) state must also reset only
   16665 		 * on power good, as well as the MTA (Multicast table array).
   16666 		 * The SMBus release must also be disabled on LCD reset.
   16667 		 */
   16668 
   16669 		/*
   16670 		 * Enable MTA to reset for Intel Rapid Start Technology
   16671 		 * Support
   16672 		 */
   16673 
   16674 release:
   16675 		sc->phy.release(sc);
   16676 	}
   16677 out:
   16678 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16679 
   16680 	if (sc->sc_type == WM_T_ICH8)
   16681 		wm_gig_downshift_workaround_ich8lan(sc);
   16682 
   16683 	if (sc->sc_type >= WM_T_PCH) {
   16684 		wm_oem_bits_config_ich8lan(sc, false);
   16685 
   16686 		/* Reset PHY to activate OEM bits on 82577/8 */
   16687 		if (sc->sc_type == WM_T_PCH)
   16688 			wm_reset_phy(sc);
   16689 
   16690 		if (sc->phy.acquire(sc) != 0)
   16691 			return;
   16692 		wm_write_smbus_addr(sc);
   16693 		sc->phy.release(sc);
   16694 	}
   16695 }
   16696 
   16697 /*
   16698  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16699  *  @sc: pointer to the HW structure
   16700  *
   16701  *  During Sx to S0 transitions on non-managed devices or managed devices
   16702  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16703  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16704  *  the PHY.
   16705  *  On i217, setup Intel Rapid Start Technology.
   16706  */
   16707 static int
   16708 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16709 {
   16710 	device_t dev = sc->sc_dev;
   16711 	int rv;
   16712 
   16713 	if (sc->sc_type < WM_T_PCH2)
   16714 		return 0;
   16715 
   16716 	rv = wm_init_phy_workarounds_pchlan(sc);
   16717 	if (rv != 0)
   16718 		return rv;
   16719 
   16720 	/* For i217 Intel Rapid Start Technology support when the system
   16721 	 * is transitioning from Sx and no manageability engine is present
   16722 	 * configure SMBus to restore on reset, disable proxy, and enable
   16723 	 * the reset on MTA (Multicast table array).
   16724 	 */
   16725 	if (sc->sc_phytype == WMPHY_I217) {
   16726 		uint16_t phy_reg;
   16727 
   16728 		rv = sc->phy.acquire(sc);
   16729 		if (rv != 0)
   16730 			return rv;
   16731 
   16732 		/* Clear Auto Enable LPI after link up */
   16733 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16734 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16735 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16736 
   16737 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16738 			/* Restore clear on SMB if no manageability engine
   16739 			 * is present
   16740 			 */
   16741 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16742 			    &phy_reg);
   16743 			if (rv != 0)
   16744 				goto release;
   16745 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16746 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16747 
   16748 			/* Disable Proxy */
   16749 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16750 		}
   16751 		/* Enable reset on MTA */
   16752 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16753 		if (rv != 0)
   16754 			goto release;
   16755 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16756 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16757 
   16758 release:
   16759 		sc->phy.release(sc);
   16760 		return rv;
   16761 	}
   16762 
   16763 	return 0;
   16764 }
   16765 
   16766 static void
   16767 wm_enable_wakeup(struct wm_softc *sc)
   16768 {
   16769 	uint32_t reg, pmreg;
   16770 	pcireg_t pmode;
   16771 	int rv = 0;
   16772 
   16773 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16774 		device_xname(sc->sc_dev), __func__));
   16775 
   16776 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16777 	    &pmreg, NULL) == 0)
   16778 		return;
   16779 
   16780 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16781 		goto pme;
   16782 
   16783 	/* Advertise the wakeup capability */
   16784 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16785 	    | CTRL_SWDPIN(3));
   16786 
   16787 	/* Keep the laser running on fiber adapters */
   16788 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16789 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16790 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16791 		reg |= CTRL_EXT_SWDPIN(3);
   16792 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16793 	}
   16794 
   16795 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16796 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16797 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16798 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP) ||
   16799 	    (sc->sc_type == WM_T_PCH_TGP))
   16800 		wm_suspend_workarounds_ich8lan(sc);
   16801 
   16802 #if 0	/* For the multicast packet */
   16803 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16804 	reg |= WUFC_MC;
   16805 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16806 #endif
   16807 
   16808 	if (sc->sc_type >= WM_T_PCH) {
   16809 		rv = wm_enable_phy_wakeup(sc);
   16810 		if (rv != 0)
   16811 			goto pme;
   16812 	} else {
   16813 		/* Enable wakeup by the MAC */
   16814 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16815 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16816 	}
   16817 
   16818 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16819 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16820 		|| (sc->sc_type == WM_T_PCH2))
   16821 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16822 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16823 
   16824 pme:
   16825 	/* Request PME */
   16826 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16827 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16828 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16829 		/* For WOL */
   16830 		pmode |= PCI_PMCSR_PME_EN;
   16831 	} else {
   16832 		/* Disable WOL */
   16833 		pmode &= ~PCI_PMCSR_PME_EN;
   16834 	}
   16835 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16836 }
   16837 
   16838 /* Disable ASPM L0s and/or L1 for workaround */
   16839 static void
   16840 wm_disable_aspm(struct wm_softc *sc)
   16841 {
   16842 	pcireg_t reg, mask = 0;
   16843 	unsigned const char *str = "";
   16844 
   16845 	/*
   16846 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16847 	 * space.
   16848 	 */
   16849 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16850 		return;
   16851 
   16852 	switch (sc->sc_type) {
   16853 	case WM_T_82571:
   16854 	case WM_T_82572:
   16855 		/*
   16856 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16857 		 * State Power management L1 State (ASPM L1).
   16858 		 */
   16859 		mask = PCIE_LCSR_ASPM_L1;
   16860 		str = "L1 is";
   16861 		break;
   16862 	case WM_T_82573:
   16863 	case WM_T_82574:
   16864 	case WM_T_82583:
   16865 		/*
   16866 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16867 		 *
   16868 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16869 		 * some chipset.  The document of 82574 and 82583 says that
   16870 		 * disabling L0s with some specific chipset is sufficient,
   16871 		 * but we follow as of the Intel em driver does.
   16872 		 *
   16873 		 * References:
   16874 		 * Errata 8 of the Specification Update of i82573.
   16875 		 * Errata 20 of the Specification Update of i82574.
   16876 		 * Errata 9 of the Specification Update of i82583.
   16877 		 */
   16878 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16879 		str = "L0s and L1 are";
   16880 		break;
   16881 	default:
   16882 		return;
   16883 	}
   16884 
   16885 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16886 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16887 	reg &= ~mask;
   16888 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16889 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16890 
   16891 	/* Print only in wm_attach() */
   16892 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16893 		aprint_verbose_dev(sc->sc_dev,
   16894 		    "ASPM %s disabled to workaround the errata.\n", str);
   16895 }
   16896 
   16897 /* LPLU */
   16898 
   16899 static void
   16900 wm_lplu_d0_disable(struct wm_softc *sc)
   16901 {
   16902 	struct mii_data *mii = &sc->sc_mii;
   16903 	uint32_t reg;
   16904 	uint16_t phyval;
   16905 
   16906 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16907 		device_xname(sc->sc_dev), __func__));
   16908 
   16909 	if (sc->sc_phytype == WMPHY_IFE)
   16910 		return;
   16911 
   16912 	switch (sc->sc_type) {
   16913 	case WM_T_82571:
   16914 	case WM_T_82572:
   16915 	case WM_T_82573:
   16916 	case WM_T_82575:
   16917 	case WM_T_82576:
   16918 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16919 		phyval &= ~PMR_D0_LPLU;
   16920 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16921 		break;
   16922 	case WM_T_82580:
   16923 	case WM_T_I350:
   16924 	case WM_T_I210:
   16925 	case WM_T_I211:
   16926 		reg = CSR_READ(sc, WMREG_PHPM);
   16927 		reg &= ~PHPM_D0A_LPLU;
   16928 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16929 		break;
   16930 	case WM_T_82574:
   16931 	case WM_T_82583:
   16932 	case WM_T_ICH8:
   16933 	case WM_T_ICH9:
   16934 	case WM_T_ICH10:
   16935 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16936 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16937 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16938 		CSR_WRITE_FLUSH(sc);
   16939 		break;
   16940 	case WM_T_PCH:
   16941 	case WM_T_PCH2:
   16942 	case WM_T_PCH_LPT:
   16943 	case WM_T_PCH_SPT:
   16944 	case WM_T_PCH_CNP:
   16945 	case WM_T_PCH_TGP:
   16946 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16947 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16948 		if (wm_phy_resetisblocked(sc) == false)
   16949 			phyval |= HV_OEM_BITS_ANEGNOW;
   16950 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16951 		break;
   16952 	default:
   16953 		break;
   16954 	}
   16955 }
   16956 
   16957 /* EEE */
   16958 
   16959 static int
   16960 wm_set_eee_i350(struct wm_softc *sc)
   16961 {
   16962 	struct ethercom *ec = &sc->sc_ethercom;
   16963 	uint32_t ipcnfg, eeer;
   16964 	uint32_t ipcnfg_mask
   16965 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16966 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16967 
   16968 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16969 
   16970 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16971 	eeer = CSR_READ(sc, WMREG_EEER);
   16972 
   16973 	/* Enable or disable per user setting */
   16974 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16975 		ipcnfg |= ipcnfg_mask;
   16976 		eeer |= eeer_mask;
   16977 	} else {
   16978 		ipcnfg &= ~ipcnfg_mask;
   16979 		eeer &= ~eeer_mask;
   16980 	}
   16981 
   16982 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16983 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16984 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16985 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16986 
   16987 	return 0;
   16988 }
   16989 
   16990 static int
   16991 wm_set_eee_pchlan(struct wm_softc *sc)
   16992 {
   16993 	device_t dev = sc->sc_dev;
   16994 	struct ethercom *ec = &sc->sc_ethercom;
   16995 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16996 	int rv;
   16997 
   16998 	switch (sc->sc_phytype) {
   16999 	case WMPHY_82579:
   17000 		lpa = I82579_EEE_LP_ABILITY;
   17001 		pcs_status = I82579_EEE_PCS_STATUS;
   17002 		adv_addr = I82579_EEE_ADVERTISEMENT;
   17003 		break;
   17004 	case WMPHY_I217:
   17005 		lpa = I217_EEE_LP_ABILITY;
   17006 		pcs_status = I217_EEE_PCS_STATUS;
   17007 		adv_addr = I217_EEE_ADVERTISEMENT;
   17008 		break;
   17009 	default:
   17010 		return 0;
   17011 	}
   17012 
   17013 	rv = sc->phy.acquire(sc);
   17014 	if (rv != 0) {
   17015 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   17016 		return rv;
   17017 	}
   17018 
   17019 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   17020 	if (rv != 0)
   17021 		goto release;
   17022 
   17023 	/* Clear bits that enable EEE in various speeds */
   17024 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   17025 
   17026 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   17027 		/* Save off link partner's EEE ability */
   17028 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   17029 		if (rv != 0)
   17030 			goto release;
   17031 
   17032 		/* Read EEE advertisement */
   17033 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   17034 			goto release;
   17035 
   17036 		/*
   17037 		 * Enable EEE only for speeds in which the link partner is
   17038 		 * EEE capable and for which we advertise EEE.
   17039 		 */
   17040 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   17041 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   17042 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   17043 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   17044 			if ((data & ANLPAR_TX_FD) != 0)
   17045 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   17046 			else {
   17047 				/*
   17048 				 * EEE is not supported in 100Half, so ignore
   17049 				 * partner's EEE in 100 ability if full-duplex
   17050 				 * is not advertised.
   17051 				 */
   17052 				sc->eee_lp_ability
   17053 				    &= ~AN_EEEADVERT_100_TX;
   17054 			}
   17055 		}
   17056 	}
   17057 
   17058 	if (sc->sc_phytype == WMPHY_82579) {
   17059 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   17060 		if (rv != 0)
   17061 			goto release;
   17062 
   17063 		data &= ~I82579_LPI_PLL_SHUT_100;
   17064 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   17065 	}
   17066 
   17067 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   17068 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   17069 		goto release;
   17070 
   17071 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   17072 release:
   17073 	sc->phy.release(sc);
   17074 
   17075 	return rv;
   17076 }
   17077 
   17078 static int
   17079 wm_set_eee(struct wm_softc *sc)
   17080 {
   17081 	struct ethercom *ec = &sc->sc_ethercom;
   17082 
   17083 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   17084 		return 0;
   17085 
   17086 	if (sc->sc_type == WM_T_I354) {
   17087 		/* I354 uses an external PHY */
   17088 		return 0; /* not yet */
   17089 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   17090 		return wm_set_eee_i350(sc);
   17091 	else if (sc->sc_type >= WM_T_PCH2)
   17092 		return wm_set_eee_pchlan(sc);
   17093 
   17094 	return 0;
   17095 }
   17096 
   17097 /*
   17098  * Workarounds (mainly PHY related).
   17099  * Basically, PHY's workarounds are in the PHY drivers.
   17100  */
   17101 
   17102 /* Workaround for 82566 Kumeran PCS lock loss */
   17103 static int
   17104 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   17105 {
   17106 	struct mii_data *mii = &sc->sc_mii;
   17107 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17108 	int i, reg, rv;
   17109 	uint16_t phyreg;
   17110 
   17111 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17112 		device_xname(sc->sc_dev), __func__));
   17113 
   17114 	/* If the link is not up, do nothing */
   17115 	if ((status & STATUS_LU) == 0)
   17116 		return 0;
   17117 
   17118 	/* Nothing to do if the link is other than 1Gbps */
   17119 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   17120 		return 0;
   17121 
   17122 	for (i = 0; i < 10; i++) {
   17123 		/* read twice */
   17124 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   17125 		if (rv != 0)
   17126 			return rv;
   17127 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   17128 		if (rv != 0)
   17129 			return rv;
   17130 
   17131 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   17132 			goto out;	/* GOOD! */
   17133 
   17134 		/* Reset the PHY */
   17135 		wm_reset_phy(sc);
   17136 		delay(5*1000);
   17137 	}
   17138 
   17139 	/* Disable GigE link negotiation */
   17140 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   17141 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   17142 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   17143 
   17144 	/*
   17145 	 * Call gig speed drop workaround on Gig disable before accessing
   17146 	 * any PHY registers.
   17147 	 */
   17148 	wm_gig_downshift_workaround_ich8lan(sc);
   17149 
   17150 out:
   17151 	return 0;
   17152 }
   17153 
   17154 /*
   17155  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   17156  *  @sc: pointer to the HW structure
   17157  *
   17158  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   17159  *  LPLU, Gig disable, MDIC PHY reset):
   17160  *    1) Set Kumeran Near-end loopback
   17161  *    2) Clear Kumeran Near-end loopback
   17162  *  Should only be called for ICH8[m] devices with any 1G Phy.
   17163  */
   17164 static void
   17165 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   17166 {
   17167 	uint16_t kmreg;
   17168 
   17169 	/* Only for igp3 */
   17170 	if (sc->sc_phytype == WMPHY_IGP_3) {
   17171 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   17172 			return;
   17173 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   17174 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   17175 			return;
   17176 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   17177 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   17178 	}
   17179 }
   17180 
   17181 /*
   17182  * Workaround for pch's PHYs
   17183  * XXX should be moved to new PHY driver?
   17184  */
   17185 static int
   17186 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17187 {
   17188 	device_t dev = sc->sc_dev;
   17189 	struct mii_data *mii = &sc->sc_mii;
   17190 	struct mii_softc *child;
   17191 	uint16_t phy_data, phyrev = 0;
   17192 	int phytype = sc->sc_phytype;
   17193 	int rv;
   17194 
   17195 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17196 		device_xname(dev), __func__));
   17197 	KASSERT(sc->sc_type == WM_T_PCH);
   17198 
   17199 	/* Set MDIO slow mode before any other MDIO access */
   17200 	if (phytype == WMPHY_82577)
   17201 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   17202 			return rv;
   17203 
   17204 	child = LIST_FIRST(&mii->mii_phys);
   17205 	if (child != NULL)
   17206 		phyrev = child->mii_mpd_rev;
   17207 
   17208 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   17209 	if ((child != NULL) &&
   17210 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   17211 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   17212 		/* Disable generation of early preamble (0x4431) */
   17213 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17214 		    &phy_data);
   17215 		if (rv != 0)
   17216 			return rv;
   17217 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   17218 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   17219 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   17220 		    phy_data);
   17221 		if (rv != 0)
   17222 			return rv;
   17223 
   17224 		/* Preamble tuning for SSC */
   17225 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   17226 		if (rv != 0)
   17227 			return rv;
   17228 	}
   17229 
   17230 	/* 82578 */
   17231 	if (phytype == WMPHY_82578) {
   17232 		/*
   17233 		 * Return registers to default by doing a soft reset then
   17234 		 * writing 0x3140 to the control register
   17235 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   17236 		 */
   17237 		if ((child != NULL) && (phyrev < 2)) {
   17238 			PHY_RESET(child);
   17239 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   17240 			if (rv != 0)
   17241 				return rv;
   17242 		}
   17243 	}
   17244 
   17245 	/* Select page 0 */
   17246 	if ((rv = sc->phy.acquire(sc)) != 0)
   17247 		return rv;
   17248 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   17249 	sc->phy.release(sc);
   17250 	if (rv != 0)
   17251 		return rv;
   17252 
   17253 	/*
   17254 	 * Configure the K1 Si workaround during phy reset assuming there is
   17255 	 * link so that it disables K1 if link is in 1Gbps.
   17256 	 */
   17257 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   17258 		return rv;
   17259 
   17260 	/* Workaround for link disconnects on a busy hub in half duplex */
   17261 	rv = sc->phy.acquire(sc);
   17262 	if (rv)
   17263 		return rv;
   17264 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   17265 	if (rv)
   17266 		goto release;
   17267 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   17268 	    phy_data & 0x00ff);
   17269 	if (rv)
   17270 		goto release;
   17271 
   17272 	/* Set MSE higher to enable link to stay up when noise is high */
   17273 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   17274 release:
   17275 	sc->phy.release(sc);
   17276 
   17277 	return rv;
   17278 }
   17279 
   17280 /*
   17281  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   17282  *  @sc:   pointer to the HW structure
   17283  */
   17284 static void
   17285 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   17286 {
   17287 
   17288 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17289 		device_xname(sc->sc_dev), __func__));
   17290 
   17291 	if (sc->phy.acquire(sc) != 0)
   17292 		return;
   17293 
   17294 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17295 
   17296 	sc->phy.release(sc);
   17297 }
   17298 
   17299 static void
   17300 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   17301 {
   17302 	device_t dev = sc->sc_dev;
   17303 	uint32_t mac_reg;
   17304 	uint16_t i, wuce;
   17305 	int count;
   17306 
   17307 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17308 		device_xname(dev), __func__));
   17309 
   17310 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   17311 		return;
   17312 
   17313 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   17314 	count = wm_rar_count(sc);
   17315 	for (i = 0; i < count; i++) {
   17316 		uint16_t lo, hi;
   17317 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17318 		lo = (uint16_t)(mac_reg & 0xffff);
   17319 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   17320 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   17321 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   17322 
   17323 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17324 		lo = (uint16_t)(mac_reg & 0xffff);
   17325 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   17326 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   17327 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   17328 	}
   17329 
   17330 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   17331 }
   17332 
   17333 /*
   17334  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   17335  *  with 82579 PHY
   17336  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   17337  */
   17338 static int
   17339 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   17340 {
   17341 	device_t dev = sc->sc_dev;
   17342 	int rar_count;
   17343 	int rv;
   17344 	uint32_t mac_reg;
   17345 	uint16_t dft_ctrl, data;
   17346 	uint16_t i;
   17347 
   17348 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17349 		device_xname(dev), __func__));
   17350 
   17351 	if (sc->sc_type < WM_T_PCH2)
   17352 		return 0;
   17353 
   17354 	/* Acquire PHY semaphore */
   17355 	rv = sc->phy.acquire(sc);
   17356 	if (rv != 0)
   17357 		return rv;
   17358 
   17359 	/* Disable Rx path while enabling/disabling workaround */
   17360 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   17361 	if (rv != 0)
   17362 		goto out;
   17363 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17364 	    dft_ctrl | (1 << 14));
   17365 	if (rv != 0)
   17366 		goto out;
   17367 
   17368 	if (enable) {
   17369 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   17370 		 * SHRAL/H) and initial CRC values to the MAC
   17371 		 */
   17372 		rar_count = wm_rar_count(sc);
   17373 		for (i = 0; i < rar_count; i++) {
   17374 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   17375 			uint32_t addr_high, addr_low;
   17376 
   17377 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17378 			if (!(addr_high & RAL_AV))
   17379 				continue;
   17380 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17381 			mac_addr[0] = (addr_low & 0xFF);
   17382 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   17383 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   17384 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   17385 			mac_addr[4] = (addr_high & 0xFF);
   17386 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   17387 
   17388 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   17389 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   17390 		}
   17391 
   17392 		/* Write Rx addresses to the PHY */
   17393 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17394 	}
   17395 
   17396 	/*
   17397 	 * If enable ==
   17398 	 *	true: Enable jumbo frame workaround in the MAC.
   17399 	 *	false: Write MAC register values back to h/w defaults.
   17400 	 */
   17401 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   17402 	if (enable) {
   17403 		mac_reg &= ~(1 << 14);
   17404 		mac_reg |= (7 << 15);
   17405 	} else
   17406 		mac_reg &= ~(0xf << 14);
   17407 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   17408 
   17409 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   17410 	if (enable) {
   17411 		mac_reg |= RCTL_SECRC;
   17412 		sc->sc_rctl |= RCTL_SECRC;
   17413 		sc->sc_flags |= WM_F_CRC_STRIP;
   17414 	} else {
   17415 		mac_reg &= ~RCTL_SECRC;
   17416 		sc->sc_rctl &= ~RCTL_SECRC;
   17417 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   17418 	}
   17419 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   17420 
   17421 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   17422 	if (rv != 0)
   17423 		goto out;
   17424 	if (enable)
   17425 		data |= 1 << 0;
   17426 	else
   17427 		data &= ~(1 << 0);
   17428 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   17429 	if (rv != 0)
   17430 		goto out;
   17431 
   17432 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   17433 	if (rv != 0)
   17434 		goto out;
   17435 	/*
   17436 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   17437 	 * on both the enable case and the disable case. Is it correct?
   17438 	 */
   17439 	data &= ~(0xf << 8);
   17440 	data |= (0xb << 8);
   17441 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   17442 	if (rv != 0)
   17443 		goto out;
   17444 
   17445 	/*
   17446 	 * If enable ==
   17447 	 *	true: Enable jumbo frame workaround in the PHY.
   17448 	 *	false: Write PHY register values back to h/w defaults.
   17449 	 */
   17450 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   17451 	if (rv != 0)
   17452 		goto out;
   17453 	data &= ~(0x7F << 5);
   17454 	if (enable)
   17455 		data |= (0x37 << 5);
   17456 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   17457 	if (rv != 0)
   17458 		goto out;
   17459 
   17460 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   17461 	if (rv != 0)
   17462 		goto out;
   17463 	if (enable)
   17464 		data &= ~(1 << 13);
   17465 	else
   17466 		data |= (1 << 13);
   17467 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   17468 	if (rv != 0)
   17469 		goto out;
   17470 
   17471 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   17472 	if (rv != 0)
   17473 		goto out;
   17474 	data &= ~(0x3FF << 2);
   17475 	if (enable)
   17476 		data |= (I82579_TX_PTR_GAP << 2);
   17477 	else
   17478 		data |= (0x8 << 2);
   17479 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   17480 	if (rv != 0)
   17481 		goto out;
   17482 
   17483 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   17484 	    enable ? 0xf100 : 0x7e00);
   17485 	if (rv != 0)
   17486 		goto out;
   17487 
   17488 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   17489 	if (rv != 0)
   17490 		goto out;
   17491 	if (enable)
   17492 		data |= 1 << 10;
   17493 	else
   17494 		data &= ~(1 << 10);
   17495 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   17496 	if (rv != 0)
   17497 		goto out;
   17498 
   17499 	/* Re-enable Rx path after enabling/disabling workaround */
   17500 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17501 	    dft_ctrl & ~(1 << 14));
   17502 
   17503 out:
   17504 	sc->phy.release(sc);
   17505 
   17506 	return rv;
   17507 }
   17508 
   17509 /*
   17510  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   17511  *  done after every PHY reset.
   17512  */
   17513 static int
   17514 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17515 {
   17516 	device_t dev = sc->sc_dev;
   17517 	int rv;
   17518 
   17519 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17520 		device_xname(dev), __func__));
   17521 	KASSERT(sc->sc_type == WM_T_PCH2);
   17522 
   17523 	/* Set MDIO slow mode before any other MDIO access */
   17524 	rv = wm_set_mdio_slow_mode_hv(sc);
   17525 	if (rv != 0)
   17526 		return rv;
   17527 
   17528 	rv = sc->phy.acquire(sc);
   17529 	if (rv != 0)
   17530 		return rv;
   17531 	/* Set MSE higher to enable link to stay up when noise is high */
   17532 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   17533 	if (rv != 0)
   17534 		goto release;
   17535 	/* Drop link after 5 times MSE threshold was reached */
   17536 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   17537 release:
   17538 	sc->phy.release(sc);
   17539 
   17540 	return rv;
   17541 }
   17542 
   17543 /**
   17544  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   17545  *  @link: link up bool flag
   17546  *
   17547  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   17548  *  preventing further DMA write requests.  Workaround the issue by disabling
   17549  *  the de-assertion of the clock request when in 1Gpbs mode.
   17550  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   17551  *  speeds in order to avoid Tx hangs.
   17552  **/
   17553 static int
   17554 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   17555 {
   17556 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   17557 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17558 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   17559 	uint16_t phyreg;
   17560 
   17561 	if (link && (speed == STATUS_SPEED_1000)) {
   17562 		int rv;
   17563 
   17564 		rv = sc->phy.acquire(sc);
   17565 		if (rv != 0)
   17566 			return rv;
   17567 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17568 		    &phyreg);
   17569 		if (rv != 0)
   17570 			goto release;
   17571 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17572 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17573 		if (rv != 0)
   17574 			goto release;
   17575 		delay(20);
   17576 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17577 
   17578 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17579 		    &phyreg);
   17580 release:
   17581 		sc->phy.release(sc);
   17582 		return rv;
   17583 	}
   17584 
   17585 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17586 
   17587 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17588 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17589 	    || !link
   17590 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17591 		goto update_fextnvm6;
   17592 
   17593 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17594 
   17595 	/* Clear link status transmit timeout */
   17596 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17597 	if (speed == STATUS_SPEED_100) {
   17598 		/* Set inband Tx timeout to 5x10us for 100Half */
   17599 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17600 
   17601 		/* Do not extend the K1 entry latency for 100Half */
   17602 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17603 	} else {
   17604 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17605 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17606 
   17607 		/* Extend the K1 entry latency for 10 Mbps */
   17608 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17609 	}
   17610 
   17611 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17612 
   17613 update_fextnvm6:
   17614 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17615 	return 0;
   17616 }
   17617 
   17618 /*
   17619  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17620  *  @sc:   pointer to the HW structure
   17621  *  @link: link up bool flag
   17622  *
   17623  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17624  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17625  *  If link is down, the function will restore the default K1 setting located
   17626  *  in the NVM.
   17627  */
   17628 static int
   17629 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17630 {
   17631 	int k1_enable = sc->sc_nvm_k1_enabled;
   17632 	int rv;
   17633 
   17634 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17635 		device_xname(sc->sc_dev), __func__));
   17636 
   17637 	rv = sc->phy.acquire(sc);
   17638 	if (rv != 0)
   17639 		return rv;
   17640 
   17641 	if (link) {
   17642 		k1_enable = 0;
   17643 
   17644 		/* Link stall fix for link up */
   17645 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17646 		    0x0100);
   17647 	} else {
   17648 		/* Link stall fix for link down */
   17649 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17650 		    0x4100);
   17651 	}
   17652 
   17653 	wm_configure_k1_ich8lan(sc, k1_enable);
   17654 	sc->phy.release(sc);
   17655 
   17656 	return 0;
   17657 }
   17658 
   17659 /*
   17660  *  wm_k1_workaround_lv - K1 Si workaround
   17661  *  @sc:   pointer to the HW structure
   17662  *
   17663  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17664  *  Disable K1 for 1000 and 100 speeds
   17665  */
   17666 static int
   17667 wm_k1_workaround_lv(struct wm_softc *sc)
   17668 {
   17669 	uint32_t reg;
   17670 	uint16_t phyreg;
   17671 	int rv;
   17672 
   17673 	if (sc->sc_type != WM_T_PCH2)
   17674 		return 0;
   17675 
   17676 	/* Set K1 beacon duration based on 10Mbps speed */
   17677 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17678 	if (rv != 0)
   17679 		return rv;
   17680 
   17681 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17682 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17683 		if (phyreg &
   17684 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17685 			/* LV 1G/100 Packet drop issue wa  */
   17686 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17687 			    &phyreg);
   17688 			if (rv != 0)
   17689 				return rv;
   17690 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17691 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17692 			    phyreg);
   17693 			if (rv != 0)
   17694 				return rv;
   17695 		} else {
   17696 			/* For 10Mbps */
   17697 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17698 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17699 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17700 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17701 		}
   17702 	}
   17703 
   17704 	return 0;
   17705 }
   17706 
   17707 /*
   17708  *  wm_link_stall_workaround_hv - Si workaround
   17709  *  @sc: pointer to the HW structure
   17710  *
   17711  *  This function works around a Si bug where the link partner can get
   17712  *  a link up indication before the PHY does. If small packets are sent
   17713  *  by the link partner they can be placed in the packet buffer without
   17714  *  being properly accounted for by the PHY and will stall preventing
   17715  *  further packets from being received.  The workaround is to clear the
   17716  *  packet buffer after the PHY detects link up.
   17717  */
   17718 static int
   17719 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17720 {
   17721 	uint16_t phyreg;
   17722 
   17723 	if (sc->sc_phytype != WMPHY_82578)
   17724 		return 0;
   17725 
   17726 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17727 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17728 	if ((phyreg & BMCR_LOOP) != 0)
   17729 		return 0;
   17730 
   17731 	/* Check if link is up and at 1Gbps */
   17732 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17733 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17734 	    | BM_CS_STATUS_SPEED_MASK;
   17735 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17736 		| BM_CS_STATUS_SPEED_1000))
   17737 		return 0;
   17738 
   17739 	delay(200 * 1000);	/* XXX too big */
   17740 
   17741 	/* Flush the packets in the fifo buffer */
   17742 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17743 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17744 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17745 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17746 
   17747 	return 0;
   17748 }
   17749 
   17750 static int
   17751 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17752 {
   17753 	int rv;
   17754 
   17755 	rv = sc->phy.acquire(sc);
   17756 	if (rv != 0) {
   17757 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17758 		    __func__);
   17759 		return rv;
   17760 	}
   17761 
   17762 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17763 
   17764 	sc->phy.release(sc);
   17765 
   17766 	return rv;
   17767 }
   17768 
   17769 static int
   17770 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17771 {
   17772 	int rv;
   17773 	uint16_t reg;
   17774 
   17775 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17776 	if (rv != 0)
   17777 		return rv;
   17778 
   17779 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17780 	    reg | HV_KMRN_MDIO_SLOW);
   17781 }
   17782 
   17783 /*
   17784  *  wm_configure_k1_ich8lan - Configure K1 power state
   17785  *  @sc: pointer to the HW structure
   17786  *  @enable: K1 state to configure
   17787  *
   17788  *  Configure the K1 power state based on the provided parameter.
   17789  *  Assumes semaphore already acquired.
   17790  */
   17791 static void
   17792 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17793 {
   17794 	uint32_t ctrl, ctrl_ext, tmp;
   17795 	uint16_t kmreg;
   17796 	int rv;
   17797 
   17798 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17799 
   17800 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17801 	if (rv != 0)
   17802 		return;
   17803 
   17804 	if (k1_enable)
   17805 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17806 	else
   17807 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17808 
   17809 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17810 	if (rv != 0)
   17811 		return;
   17812 
   17813 	delay(20);
   17814 
   17815 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17816 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17817 
   17818 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17819 	tmp |= CTRL_FRCSPD;
   17820 
   17821 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17822 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17823 	CSR_WRITE_FLUSH(sc);
   17824 	delay(20);
   17825 
   17826 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17827 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17828 	CSR_WRITE_FLUSH(sc);
   17829 	delay(20);
   17830 
   17831 	return;
   17832 }
   17833 
   17834 /* special case - for 82575 - need to do manual init ... */
   17835 static void
   17836 wm_reset_init_script_82575(struct wm_softc *sc)
   17837 {
   17838 	/*
   17839 	 * Remark: this is untested code - we have no board without EEPROM
   17840 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17841 	 */
   17842 
   17843 	/* SerDes configuration via SERDESCTRL */
   17844 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17845 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17846 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17847 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17848 
   17849 	/* CCM configuration via CCMCTL register */
   17850 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17851 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17852 
   17853 	/* PCIe lanes configuration */
   17854 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17855 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17856 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17857 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17858 
   17859 	/* PCIe PLL Configuration */
   17860 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17861 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17862 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17863 }
   17864 
   17865 static void
   17866 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17867 {
   17868 	uint32_t reg;
   17869 	uint16_t nvmword;
   17870 	int rv;
   17871 
   17872 	if (sc->sc_type != WM_T_82580)
   17873 		return;
   17874 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17875 		return;
   17876 
   17877 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17878 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17879 	if (rv != 0) {
   17880 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17881 		    __func__);
   17882 		return;
   17883 	}
   17884 
   17885 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17886 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17887 		reg |= MDICNFG_DEST;
   17888 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17889 		reg |= MDICNFG_COM_MDIO;
   17890 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17891 }
   17892 
   17893 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17894 
   17895 static bool
   17896 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17897 {
   17898 	uint32_t reg;
   17899 	uint16_t id1, id2;
   17900 	int i, rv;
   17901 
   17902 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17903 		device_xname(sc->sc_dev), __func__));
   17904 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17905 
   17906 	id1 = id2 = 0xffff;
   17907 	for (i = 0; i < 2; i++) {
   17908 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17909 		    &id1);
   17910 		if ((rv != 0) || MII_INVALIDID(id1))
   17911 			continue;
   17912 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17913 		    &id2);
   17914 		if ((rv != 0) || MII_INVALIDID(id2))
   17915 			continue;
   17916 		break;
   17917 	}
   17918 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17919 		goto out;
   17920 
   17921 	/*
   17922 	 * In case the PHY needs to be in mdio slow mode,
   17923 	 * set slow mode and try to get the PHY id again.
   17924 	 */
   17925 	rv = 0;
   17926 	if (sc->sc_type < WM_T_PCH_LPT) {
   17927 		wm_set_mdio_slow_mode_hv_locked(sc);
   17928 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17929 		    &id1);
   17930 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17931 		    &id2);
   17932 	}
   17933 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17934 		device_printf(sc->sc_dev, "XXX return with false\n");
   17935 		return false;
   17936 	}
   17937 out:
   17938 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17939 		/* Only unforce SMBus if ME is not active */
   17940 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17941 			uint16_t phyreg;
   17942 
   17943 			/* Unforce SMBus mode in PHY */
   17944 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17945 			    CV_SMB_CTRL, &phyreg);
   17946 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17947 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17948 			    CV_SMB_CTRL, phyreg);
   17949 
   17950 			/* Unforce SMBus mode in MAC */
   17951 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17952 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17953 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17954 		}
   17955 	}
   17956 	return true;
   17957 }
   17958 
   17959 static void
   17960 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17961 {
   17962 	uint32_t reg;
   17963 	int i;
   17964 
   17965 	/* Set PHY Config Counter to 50msec */
   17966 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17967 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17968 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17969 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17970 
   17971 	/* Toggle LANPHYPC */
   17972 	reg = CSR_READ(sc, WMREG_CTRL);
   17973 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17974 	reg &= ~CTRL_LANPHYPC_VALUE;
   17975 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17976 	CSR_WRITE_FLUSH(sc);
   17977 	delay(1000);
   17978 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17979 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17980 	CSR_WRITE_FLUSH(sc);
   17981 
   17982 	if (sc->sc_type < WM_T_PCH_LPT)
   17983 		delay(50 * 1000);
   17984 	else {
   17985 		i = 20;
   17986 
   17987 		do {
   17988 			delay(5 * 1000);
   17989 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17990 		    && i--);
   17991 
   17992 		delay(30 * 1000);
   17993 	}
   17994 }
   17995 
   17996 static int
   17997 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17998 {
   17999 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   18000 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   18001 	uint32_t rxa;
   18002 	uint16_t scale = 0, lat_enc = 0;
   18003 	int32_t obff_hwm = 0;
   18004 	int64_t lat_ns, value;
   18005 
   18006 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   18007 		device_xname(sc->sc_dev), __func__));
   18008 
   18009 	if (link) {
   18010 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   18011 		uint32_t status;
   18012 		uint16_t speed;
   18013 		pcireg_t preg;
   18014 
   18015 		status = CSR_READ(sc, WMREG_STATUS);
   18016 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   18017 		case STATUS_SPEED_10:
   18018 			speed = 10;
   18019 			break;
   18020 		case STATUS_SPEED_100:
   18021 			speed = 100;
   18022 			break;
   18023 		case STATUS_SPEED_1000:
   18024 			speed = 1000;
   18025 			break;
   18026 		default:
   18027 			device_printf(sc->sc_dev, "Unknown speed "
   18028 			    "(status = %08x)\n", status);
   18029 			return -1;
   18030 		}
   18031 
   18032 		/* Rx Packet Buffer Allocation size (KB) */
   18033 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   18034 
   18035 		/*
   18036 		 * Determine the maximum latency tolerated by the device.
   18037 		 *
   18038 		 * Per the PCIe spec, the tolerated latencies are encoded as
   18039 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   18040 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   18041 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   18042 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   18043 		 */
   18044 		lat_ns = ((int64_t)rxa * 1024 -
   18045 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   18046 			+ ETHER_HDR_LEN))) * 8 * 1000;
   18047 		if (lat_ns < 0)
   18048 			lat_ns = 0;
   18049 		else
   18050 			lat_ns /= speed;
   18051 		value = lat_ns;
   18052 
   18053 		while (value > LTRV_VALUE) {
   18054 			scale ++;
   18055 			value = howmany(value, __BIT(5));
   18056 		}
   18057 		if (scale > LTRV_SCALE_MAX) {
   18058 			device_printf(sc->sc_dev,
   18059 			    "Invalid LTR latency scale %d\n", scale);
   18060 			return -1;
   18061 		}
   18062 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   18063 
   18064 		/* Determine the maximum latency tolerated by the platform */
   18065 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   18066 		    WM_PCI_LTR_CAP_LPT);
   18067 		max_snoop = preg & 0xffff;
   18068 		max_nosnoop = preg >> 16;
   18069 
   18070 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   18071 
   18072 		if (lat_enc > max_ltr_enc) {
   18073 			lat_enc = max_ltr_enc;
   18074 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   18075 			    * PCI_LTR_SCALETONS(
   18076 				    __SHIFTOUT(lat_enc,
   18077 					PCI_LTR_MAXSNOOPLAT_SCALE));
   18078 		}
   18079 
   18080 		if (lat_ns) {
   18081 			lat_ns *= speed * 1000;
   18082 			lat_ns /= 8;
   18083 			lat_ns /= 1000000000;
   18084 			obff_hwm = (int32_t)(rxa - lat_ns);
   18085 		}
   18086 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   18087 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   18088 			    "(rxa = %d, lat_ns = %d)\n",
   18089 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   18090 			return -1;
   18091 		}
   18092 	}
   18093 	/* Snoop and No-Snoop latencies the same */
   18094 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   18095 	CSR_WRITE(sc, WMREG_LTRV, reg);
   18096 
   18097 	/* Set OBFF high water mark */
   18098 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   18099 	reg |= obff_hwm;
   18100 	CSR_WRITE(sc, WMREG_SVT, reg);
   18101 
   18102 	/* Enable OBFF */
   18103 	reg = CSR_READ(sc, WMREG_SVCR);
   18104 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   18105 	CSR_WRITE(sc, WMREG_SVCR, reg);
   18106 
   18107 	return 0;
   18108 }
   18109 
   18110 /*
   18111  * I210 Errata 25 and I211 Errata 10
   18112  * Slow System Clock.
   18113  *
   18114  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   18115  */
   18116 static int
   18117 wm_pll_workaround_i210(struct wm_softc *sc)
   18118 {
   18119 	uint32_t mdicnfg, wuc;
   18120 	uint32_t reg;
   18121 	pcireg_t pcireg;
   18122 	uint32_t pmreg;
   18123 	uint16_t nvmword, tmp_nvmword;
   18124 	uint16_t phyval;
   18125 	bool wa_done = false;
   18126 	int i, rv = 0;
   18127 
   18128 	/* Get Power Management cap offset */
   18129 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   18130 	    &pmreg, NULL) == 0)
   18131 		return -1;
   18132 
   18133 	/* Save WUC and MDICNFG registers */
   18134 	wuc = CSR_READ(sc, WMREG_WUC);
   18135 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   18136 
   18137 	reg = mdicnfg & ~MDICNFG_DEST;
   18138 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   18139 
   18140 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   18141 		/*
   18142 		 * The default value of the Initialization Control Word 1
   18143 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   18144 		 */
   18145 		nvmword = INVM_DEFAULT_AL;
   18146 	}
   18147 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   18148 
   18149 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   18150 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   18151 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   18152 
   18153 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   18154 			rv = 0;
   18155 			break; /* OK */
   18156 		} else
   18157 			rv = -1;
   18158 
   18159 		wa_done = true;
   18160 		/* Directly reset the internal PHY */
   18161 		reg = CSR_READ(sc, WMREG_CTRL);
   18162 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   18163 
   18164 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   18165 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   18166 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   18167 
   18168 		CSR_WRITE(sc, WMREG_WUC, 0);
   18169 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   18170 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18171 
   18172 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   18173 		    pmreg + PCI_PMCSR);
   18174 		pcireg |= PCI_PMCSR_STATE_D3;
   18175 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   18176 		    pmreg + PCI_PMCSR, pcireg);
   18177 		delay(1000);
   18178 		pcireg &= ~PCI_PMCSR_STATE_D3;
   18179 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   18180 		    pmreg + PCI_PMCSR, pcireg);
   18181 
   18182 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   18183 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   18184 
   18185 		/* Restore WUC register */
   18186 		CSR_WRITE(sc, WMREG_WUC, wuc);
   18187 	}
   18188 
   18189 	/* Restore MDICNFG setting */
   18190 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   18191 	if (wa_done)
   18192 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   18193 	return rv;
   18194 }
   18195 
   18196 static void
   18197 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   18198 {
   18199 	uint32_t reg;
   18200 
   18201 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   18202 		device_xname(sc->sc_dev), __func__));
   18203 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   18204 	    || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP));
   18205 
   18206 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   18207 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   18208 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   18209 
   18210 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   18211 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   18212 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   18213 }
   18214 
   18215 /* Sysctl functions */
   18216 static int
   18217 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   18218 {
   18219 	struct sysctlnode node = *rnode;
   18220 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18221 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18222 	struct wm_softc *sc = txq->txq_sc;
   18223 	uint32_t reg;
   18224 
   18225 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   18226 	node.sysctl_data = &reg;
   18227 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18228 }
   18229 
   18230 static int
   18231 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   18232 {
   18233 	struct sysctlnode node = *rnode;
   18234 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   18235 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   18236 	struct wm_softc *sc = txq->txq_sc;
   18237 	uint32_t reg;
   18238 
   18239 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   18240 	node.sysctl_data = &reg;
   18241 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   18242 }
   18243 
   18244 #ifdef WM_DEBUG
   18245 static int
   18246 wm_sysctl_debug(SYSCTLFN_ARGS)
   18247 {
   18248 	struct sysctlnode node = *rnode;
   18249 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   18250 	uint32_t dflags;
   18251 	int error;
   18252 
   18253 	dflags = sc->sc_debug;
   18254 	node.sysctl_data = &dflags;
   18255 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   18256 
   18257 	if (error || newp == NULL)
   18258 		return error;
   18259 
   18260 	sc->sc_debug = dflags;
   18261 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   18262 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   18263 
   18264 	return 0;
   18265 }
   18266 #endif
   18267