Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.765
      1 /*	$NetBSD: if_wm.c,v 1.765 2022/10/19 06:37:25 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.765 2022/10/19 06:37:25 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 
    464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	struct work wmq_cookie;
    482 	void *wmq_si;
    483 };
    484 
    485 struct wm_phyop {
    486 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    487 	void (*release)(struct wm_softc *);
    488 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    489 	int (*writereg_locked)(device_t, int, int, uint16_t);
    490 	int reset_delay_us;
    491 	bool no_errprint;
    492 };
    493 
    494 struct wm_nvmop {
    495 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    496 	void (*release)(struct wm_softc *);
    497 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    498 };
    499 
    500 /*
    501  * Software state per device.
    502  */
    503 struct wm_softc {
    504 	device_t sc_dev;		/* generic device information */
    505 	bus_space_tag_t sc_st;		/* bus space tag */
    506 	bus_space_handle_t sc_sh;	/* bus space handle */
    507 	bus_size_t sc_ss;		/* bus space size */
    508 	bus_space_tag_t sc_iot;		/* I/O space tag */
    509 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    510 	bus_size_t sc_ios;		/* I/O space size */
    511 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    512 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    513 	bus_size_t sc_flashs;		/* flash registers space size */
    514 	off_t sc_flashreg_offset;	/*
    515 					 * offset to flash registers from
    516 					 * start of BAR
    517 					 */
    518 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    519 
    520 	struct ethercom sc_ethercom;	/* Ethernet common data */
    521 	struct mii_data sc_mii;		/* MII/media information */
    522 
    523 	pci_chipset_tag_t sc_pc;
    524 	pcitag_t sc_pcitag;
    525 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    526 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    527 
    528 	uint16_t sc_pcidevid;		/* PCI device ID */
    529 	wm_chip_type sc_type;		/* MAC type */
    530 	int sc_rev;			/* MAC revision */
    531 	wm_phy_type sc_phytype;		/* PHY type */
    532 	uint8_t sc_sfptype;		/* SFP type */
    533 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    534 #define	WM_MEDIATYPE_UNKNOWN		0x00
    535 #define	WM_MEDIATYPE_FIBER		0x01
    536 #define	WM_MEDIATYPE_COPPER		0x02
    537 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    538 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    539 	int sc_flags;			/* flags; see below */
    540 	u_short sc_if_flags;		/* last if_flags */
    541 	int sc_ec_capenable;		/* last ec_capenable */
    542 	int sc_flowflags;		/* 802.3x flow control flags */
    543 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    544 	int sc_align_tweak;
    545 
    546 	void *sc_ihs[WM_MAX_NINTR];	/*
    547 					 * interrupt cookie.
    548 					 * - legacy and msi use sc_ihs[0] only
    549 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    550 					 */
    551 	pci_intr_handle_t *sc_intrs;	/*
    552 					 * legacy and msi use sc_intrs[0] only
    553 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    554 					 */
    555 	int sc_nintrs;			/* number of interrupts */
    556 
    557 	int sc_link_intr_idx;		/* index of MSI-X tables */
    558 
    559 	callout_t sc_tick_ch;		/* tick callout */
    560 	bool sc_core_stopping;
    561 
    562 	int sc_nvm_ver_major;
    563 	int sc_nvm_ver_minor;
    564 	int sc_nvm_ver_build;
    565 	int sc_nvm_addrbits;		/* NVM address bits */
    566 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    567 	int sc_ich8_flash_base;
    568 	int sc_ich8_flash_bank_size;
    569 	int sc_nvm_k1_enabled;
    570 
    571 	int sc_nqueues;
    572 	struct wm_queue *sc_queue;
    573 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    574 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    575 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    576 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    577 	struct workqueue *sc_queue_wq;
    578 	bool sc_txrx_use_workqueue;
    579 
    580 	int sc_affinity_offset;
    581 
    582 #ifdef WM_EVENT_COUNTERS
    583 	/* Event counters. */
    584 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    585 
    586 	/* >= WM_T_82542_2_1 */
    587 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    588 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    589 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    590 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    591 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    592 
    593 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    594 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    595 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    596 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    597 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    598 	struct evcnt sc_ev_colc;	/* Collision */
    599 	struct evcnt sc_ev_sec;		/* Sequence Error */
    600 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    601 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    602 	struct evcnt sc_ev_scc;		/* Single Collision */
    603 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    604 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    605 	struct evcnt sc_ev_latecol;	/* Late Collision */
    606 	struct evcnt sc_ev_dc;		/* Defer */
    607 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    608 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    609 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    610 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    611 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    612 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    613 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    614 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    615 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    616 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    617 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    618 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    619 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    620 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    621 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    622 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    623 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx Count */
    624 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    625 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    626 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    627 	struct evcnt sc_ev_prc511;	/* Packets Rx (255-511 bytes) */
    628 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    629 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    630 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    631 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    632 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    633 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    634 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    635 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    636 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    637 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    638 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    639 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    640 	struct evcnt sc_ev_ictxact;	/* Intr. Cause Tx Abs Timer Expire */
    641 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    642 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    643 	struct evcnt sc_ev_icrxdmtc;	/* Intr. Cause Rx Desc Min Thresh */
    644 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    645 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    646 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    647 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    648 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    649 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    650 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    651 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    652 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    653 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    654 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    655 
    656 #endif /* WM_EVENT_COUNTERS */
    657 
    658 	struct sysctllog *sc_sysctllog;
    659 
    660 	/* This variable are used only on the 82547. */
    661 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    662 
    663 	uint32_t sc_ctrl;		/* prototype CTRL register */
    664 #if 0
    665 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    666 #endif
    667 	uint32_t sc_icr;		/* prototype interrupt bits */
    668 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    669 	uint32_t sc_tctl;		/* prototype TCTL register */
    670 	uint32_t sc_rctl;		/* prototype RCTL register */
    671 	uint32_t sc_txcw;		/* prototype TXCW register */
    672 	uint32_t sc_tipg;		/* prototype TIPG register */
    673 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    674 	uint32_t sc_pba;		/* prototype PBA register */
    675 
    676 	int sc_tbi_linkup;		/* TBI link status */
    677 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    678 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    679 
    680 	int sc_mchash_type;		/* multicast filter offset */
    681 
    682 	krndsource_t rnd_source;	/* random source */
    683 
    684 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    685 
    686 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    687 	kmutex_t *sc_ich_phymtx;	/*
    688 					 * 82574/82583/ICH/PCH specific PHY
    689 					 * mutex. For 82574/82583, the mutex
    690 					 * is used for both PHY and NVM.
    691 					 */
    692 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    693 
    694 	struct wm_phyop phy;
    695 	struct wm_nvmop nvm;
    696 
    697 	struct workqueue *sc_reset_wq;
    698 	struct work sc_reset_work;
    699 	volatile unsigned sc_reset_pending;
    700 
    701 	bool sc_dying;
    702 
    703 #ifdef WM_DEBUG
    704 	uint32_t sc_debug;
    705 	bool sc_trigger_reset;
    706 #endif
    707 };
    708 
    709 #define	WM_RXCHAIN_RESET(rxq)						\
    710 do {									\
    711 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    712 	*(rxq)->rxq_tailp = NULL;					\
    713 	(rxq)->rxq_len = 0;						\
    714 } while (/*CONSTCOND*/0)
    715 
    716 #define	WM_RXCHAIN_LINK(rxq, m)						\
    717 do {									\
    718 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    719 	(rxq)->rxq_tailp = &(m)->m_next;				\
    720 } while (/*CONSTCOND*/0)
    721 
    722 #ifdef WM_EVENT_COUNTERS
    723 #ifdef __HAVE_ATOMIC64_LOADSTORE
    724 #define	WM_EVCNT_INCR(ev)						\
    725 	atomic_store_relaxed(&((ev)->ev_count),				\
    726 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    727 #define	WM_EVCNT_ADD(ev, val)						\
    728 	atomic_store_relaxed(&((ev)->ev_count),				\
    729 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    730 #else
    731 #define	WM_EVCNT_INCR(ev)						\
    732 	((ev)->ev_count)++
    733 #define	WM_EVCNT_ADD(ev, val)						\
    734 	(ev)->ev_count += (val)
    735 #endif
    736 
    737 #define WM_Q_EVCNT_INCR(qname, evname)			\
    738 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    739 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    740 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    741 #else /* !WM_EVENT_COUNTERS */
    742 #define	WM_EVCNT_INCR(ev)	/* nothing */
    743 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    744 
    745 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    746 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    747 #endif /* !WM_EVENT_COUNTERS */
    748 
    749 #define	CSR_READ(sc, reg)						\
    750 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    751 #define	CSR_WRITE(sc, reg, val)						\
    752 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    753 #define	CSR_WRITE_FLUSH(sc)						\
    754 	(void)CSR_READ((sc), WMREG_STATUS)
    755 
    756 #define ICH8_FLASH_READ32(sc, reg)					\
    757 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    758 	    (reg) + sc->sc_flashreg_offset)
    759 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    760 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    761 	    (reg) + sc->sc_flashreg_offset, (data))
    762 
    763 #define ICH8_FLASH_READ16(sc, reg)					\
    764 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    765 	    (reg) + sc->sc_flashreg_offset)
    766 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    767 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    768 	    (reg) + sc->sc_flashreg_offset, (data))
    769 
    770 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    771 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    772 
    773 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    774 #define	WM_CDTXADDR_HI(txq, x)						\
    775 	(sizeof(bus_addr_t) == 8 ?					\
    776 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    777 
    778 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    779 #define	WM_CDRXADDR_HI(rxq, x)						\
    780 	(sizeof(bus_addr_t) == 8 ?					\
    781 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    782 
    783 /*
    784  * Register read/write functions.
    785  * Other than CSR_{READ|WRITE}().
    786  */
    787 #if 0
    788 static inline uint32_t wm_io_read(struct wm_softc *, int);
    789 #endif
    790 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    791 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    792     uint32_t, uint32_t);
    793 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    794 
    795 /*
    796  * Descriptor sync/init functions.
    797  */
    798 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    799 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    800 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    801 
    802 /*
    803  * Device driver interface functions and commonly used functions.
    804  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    805  */
    806 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    807 static int	wm_match(device_t, cfdata_t, void *);
    808 static void	wm_attach(device_t, device_t, void *);
    809 static int	wm_detach(device_t, int);
    810 static bool	wm_suspend(device_t, const pmf_qual_t *);
    811 static bool	wm_resume(device_t, const pmf_qual_t *);
    812 static bool	wm_watchdog(struct ifnet *);
    813 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    814     uint16_t *);
    815 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    816     uint16_t *);
    817 static void	wm_tick(void *);
    818 static int	wm_ifflags_cb(struct ethercom *);
    819 static int	wm_ioctl(struct ifnet *, u_long, void *);
    820 /* MAC address related */
    821 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    822 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    823 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    824 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    825 static int	wm_rar_count(struct wm_softc *);
    826 static void	wm_set_filter(struct wm_softc *);
    827 /* Reset and init related */
    828 static void	wm_set_vlan(struct wm_softc *);
    829 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    830 static void	wm_get_auto_rd_done(struct wm_softc *);
    831 static void	wm_lan_init_done(struct wm_softc *);
    832 static void	wm_get_cfg_done(struct wm_softc *);
    833 static int	wm_phy_post_reset(struct wm_softc *);
    834 static int	wm_write_smbus_addr(struct wm_softc *);
    835 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    836 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    837 static void	wm_initialize_hardware_bits(struct wm_softc *);
    838 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    839 static int	wm_reset_phy(struct wm_softc *);
    840 static void	wm_flush_desc_rings(struct wm_softc *);
    841 static void	wm_reset(struct wm_softc *);
    842 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    843 static void	wm_rxdrain(struct wm_rxqueue *);
    844 static void	wm_init_rss(struct wm_softc *);
    845 static void	wm_adjust_qnum(struct wm_softc *, int);
    846 static inline bool	wm_is_using_msix(struct wm_softc *);
    847 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    848 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    849 static int	wm_setup_legacy(struct wm_softc *);
    850 static int	wm_setup_msix(struct wm_softc *);
    851 static int	wm_init(struct ifnet *);
    852 static int	wm_init_locked(struct ifnet *);
    853 static void	wm_init_sysctls(struct wm_softc *);
    854 static void	wm_unset_stopping_flags(struct wm_softc *);
    855 static void	wm_set_stopping_flags(struct wm_softc *);
    856 static void	wm_stop(struct ifnet *, int);
    857 static void	wm_stop_locked(struct ifnet *, bool, bool);
    858 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    859 static void	wm_82547_txfifo_stall(void *);
    860 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    861 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    862 /* DMA related */
    863 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    864 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    865 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    866 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    867     struct wm_txqueue *);
    868 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    869 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    870 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    871     struct wm_rxqueue *);
    872 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    873 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    874 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    875 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    876 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    877 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    878 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    879     struct wm_txqueue *);
    880 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    881     struct wm_rxqueue *);
    882 static int	wm_alloc_txrx_queues(struct wm_softc *);
    883 static void	wm_free_txrx_queues(struct wm_softc *);
    884 static int	wm_init_txrx_queues(struct wm_softc *);
    885 /* Start */
    886 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    887     struct wm_txsoft *, uint32_t *, uint8_t *);
    888 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    889 static void	wm_start(struct ifnet *);
    890 static void	wm_start_locked(struct ifnet *);
    891 static int	wm_transmit(struct ifnet *, struct mbuf *);
    892 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    893 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    894     bool);
    895 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    896     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    897 static void	wm_nq_start(struct ifnet *);
    898 static void	wm_nq_start_locked(struct ifnet *);
    899 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    900 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    901 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    902     bool);
    903 static void	wm_deferred_start_locked(struct wm_txqueue *);
    904 static void	wm_handle_queue(void *);
    905 static void	wm_handle_queue_work(struct work *, void *);
    906 static void	wm_handle_reset_work(struct work *, void *);
    907 /* Interrupt */
    908 static bool	wm_txeof(struct wm_txqueue *, u_int);
    909 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    910 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    911 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    912 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    913 static void	wm_linkintr(struct wm_softc *, uint32_t);
    914 static int	wm_intr_legacy(void *);
    915 static inline void	wm_txrxintr_disable(struct wm_queue *);
    916 static inline void	wm_txrxintr_enable(struct wm_queue *);
    917 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    918 static int	wm_txrxintr_msix(void *);
    919 static int	wm_linkintr_msix(void *);
    920 
    921 /*
    922  * Media related.
    923  * GMII, SGMII, TBI, SERDES and SFP.
    924  */
    925 /* Common */
    926 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    927 /* GMII related */
    928 static void	wm_gmii_reset(struct wm_softc *);
    929 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    930 static int	wm_get_phy_id_82575(struct wm_softc *);
    931 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    932 static int	wm_gmii_mediachange(struct ifnet *);
    933 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    934 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    935 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    936 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    937 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    938 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    939 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    940 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    941 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    942 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    943 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    944 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    945 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    946 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    947 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    948 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    949 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    950 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    951 	bool);
    952 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    953 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    954 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    955 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    956 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    957 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    958 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    959 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    960 static void	wm_gmii_statchg(struct ifnet *);
    961 /*
    962  * kumeran related (80003, ICH* and PCH*).
    963  * These functions are not for accessing MII registers but for accessing
    964  * kumeran specific registers.
    965  */
    966 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    967 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    968 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    969 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    970 /* EMI register related */
    971 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    972 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    973 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    974 /* SGMII */
    975 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    976 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    977 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    978 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    979 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    980 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    981 /* TBI related */
    982 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    983 static void	wm_tbi_mediainit(struct wm_softc *);
    984 static int	wm_tbi_mediachange(struct ifnet *);
    985 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    986 static int	wm_check_for_link(struct wm_softc *);
    987 static void	wm_tbi_tick(struct wm_softc *);
    988 /* SERDES related */
    989 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    990 static int	wm_serdes_mediachange(struct ifnet *);
    991 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    992 static void	wm_serdes_tick(struct wm_softc *);
    993 /* SFP related */
    994 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    995 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    996 
    997 /*
    998  * NVM related.
    999  * Microwire, SPI (w/wo EERD) and Flash.
   1000  */
   1001 /* Misc functions */
   1002 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1003 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1004 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1005 /* Microwire */
   1006 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1007 /* SPI */
   1008 static int	wm_nvm_ready_spi(struct wm_softc *);
   1009 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1010 /* Using with EERD */
   1011 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1012 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1013 /* Flash */
   1014 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1015     unsigned int *);
   1016 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1017 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1018 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1019     uint32_t *);
   1020 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1021 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1022 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1023 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1024 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1025 /* iNVM */
   1026 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1027 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1028 /* Lock, detecting NVM type, validate checksum and read */
   1029 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1030 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1031 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1032 static void	wm_nvm_version_invm(struct wm_softc *);
   1033 static void	wm_nvm_version(struct wm_softc *);
   1034 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1035 
   1036 /*
   1037  * Hardware semaphores.
   1038  * Very complexed...
   1039  */
   1040 static int	wm_get_null(struct wm_softc *);
   1041 static void	wm_put_null(struct wm_softc *);
   1042 static int	wm_get_eecd(struct wm_softc *);
   1043 static void	wm_put_eecd(struct wm_softc *);
   1044 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1045 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1046 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1047 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1048 static int	wm_get_nvm_80003(struct wm_softc *);
   1049 static void	wm_put_nvm_80003(struct wm_softc *);
   1050 static int	wm_get_nvm_82571(struct wm_softc *);
   1051 static void	wm_put_nvm_82571(struct wm_softc *);
   1052 static int	wm_get_phy_82575(struct wm_softc *);
   1053 static void	wm_put_phy_82575(struct wm_softc *);
   1054 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1055 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1056 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1057 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1058 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1059 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1060 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1061 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1062 
   1063 /*
   1064  * Management mode and power management related subroutines.
   1065  * BMC, AMT, suspend/resume and EEE.
   1066  */
   1067 #if 0
   1068 static int	wm_check_mng_mode(struct wm_softc *);
   1069 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1070 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1071 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1072 #endif
   1073 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1074 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1075 static void	wm_get_hw_control(struct wm_softc *);
   1076 static void	wm_release_hw_control(struct wm_softc *);
   1077 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1078 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1079 static void	wm_init_manageability(struct wm_softc *);
   1080 static void	wm_release_manageability(struct wm_softc *);
   1081 static void	wm_get_wakeup(struct wm_softc *);
   1082 static int	wm_ulp_disable(struct wm_softc *);
   1083 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1084 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1085 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1086 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1087 static void	wm_enable_wakeup(struct wm_softc *);
   1088 static void	wm_disable_aspm(struct wm_softc *);
   1089 /* LPLU (Low Power Link Up) */
   1090 static void	wm_lplu_d0_disable(struct wm_softc *);
   1091 /* EEE */
   1092 static int	wm_set_eee_i350(struct wm_softc *);
   1093 static int	wm_set_eee_pchlan(struct wm_softc *);
   1094 static int	wm_set_eee(struct wm_softc *);
   1095 
   1096 /*
   1097  * Workarounds (mainly PHY related).
   1098  * Basically, PHY's workarounds are in the PHY drivers.
   1099  */
   1100 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1101 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1102 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1103 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1104 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1105 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1106 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1107 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1108 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1109 static int	wm_k1_workaround_lv(struct wm_softc *);
   1110 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1111 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1112 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1113 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1114 static void	wm_reset_init_script_82575(struct wm_softc *);
   1115 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1116 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1117 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1118 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1119 static int	wm_pll_workaround_i210(struct wm_softc *);
   1120 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1121 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1122 static void	wm_set_linkdown_discard(struct wm_softc *);
   1123 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1124 
   1125 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1126 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1127 #ifdef WM_DEBUG
   1128 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1129 #endif
   1130 
   1131 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1132     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1133 
   1134 /*
   1135  * Devices supported by this driver.
   1136  */
   1137 static const struct wm_product {
   1138 	pci_vendor_id_t		wmp_vendor;
   1139 	pci_product_id_t	wmp_product;
   1140 	const char		*wmp_name;
   1141 	wm_chip_type		wmp_type;
   1142 	uint32_t		wmp_flags;
   1143 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1144 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1145 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1146 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1147 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1148 } wm_products[] = {
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1150 	  "Intel i82542 1000BASE-X Ethernet",
   1151 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1154 	  "Intel i82543GC 1000BASE-X Ethernet",
   1155 	  WM_T_82543,		WMP_F_FIBER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1158 	  "Intel i82543GC 1000BASE-T Ethernet",
   1159 	  WM_T_82543,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1162 	  "Intel i82544EI 1000BASE-T Ethernet",
   1163 	  WM_T_82544,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1166 	  "Intel i82544EI 1000BASE-X Ethernet",
   1167 	  WM_T_82544,		WMP_F_FIBER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1170 	  "Intel i82544GC 1000BASE-T Ethernet",
   1171 	  WM_T_82544,		WMP_F_COPPER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1174 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1175 	  WM_T_82544,		WMP_F_COPPER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1178 	  "Intel i82540EM 1000BASE-T Ethernet",
   1179 	  WM_T_82540,		WMP_F_COPPER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1182 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1183 	  WM_T_82540,		WMP_F_COPPER },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1186 	  "Intel i82540EP 1000BASE-T Ethernet",
   1187 	  WM_T_82540,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1190 	  "Intel i82540EP 1000BASE-T Ethernet",
   1191 	  WM_T_82540,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1194 	  "Intel i82540EP 1000BASE-T Ethernet",
   1195 	  WM_T_82540,		WMP_F_COPPER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1198 	  "Intel i82545EM 1000BASE-T Ethernet",
   1199 	  WM_T_82545,		WMP_F_COPPER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1202 	  "Intel i82545GM 1000BASE-T Ethernet",
   1203 	  WM_T_82545_3,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1206 	  "Intel i82545GM 1000BASE-X Ethernet",
   1207 	  WM_T_82545_3,		WMP_F_FIBER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1210 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1211 	  WM_T_82545_3,		WMP_F_SERDES },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1214 	  "Intel i82546EB 1000BASE-T Ethernet",
   1215 	  WM_T_82546,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1218 	  "Intel i82546EB 1000BASE-T Ethernet",
   1219 	  WM_T_82546,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1222 	  "Intel i82545EM 1000BASE-X Ethernet",
   1223 	  WM_T_82545,		WMP_F_FIBER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1226 	  "Intel i82546EB 1000BASE-X Ethernet",
   1227 	  WM_T_82546,		WMP_F_FIBER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1230 	  "Intel i82546GB 1000BASE-T Ethernet",
   1231 	  WM_T_82546_3,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1234 	  "Intel i82546GB 1000BASE-X Ethernet",
   1235 	  WM_T_82546_3,		WMP_F_FIBER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1238 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1239 	  WM_T_82546_3,		WMP_F_SERDES },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1242 	  "i82546GB quad-port Gigabit Ethernet",
   1243 	  WM_T_82546_3,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1246 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1247 	  WM_T_82546_3,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1250 	  "Intel PRO/1000MT (82546GB)",
   1251 	  WM_T_82546_3,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1254 	  "Intel i82541EI 1000BASE-T Ethernet",
   1255 	  WM_T_82541,		WMP_F_COPPER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1258 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1259 	  WM_T_82541,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1262 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1263 	  WM_T_82541,		WMP_F_COPPER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1266 	  "Intel i82541ER 1000BASE-T Ethernet",
   1267 	  WM_T_82541_2,		WMP_F_COPPER },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1270 	  "Intel i82541GI 1000BASE-T Ethernet",
   1271 	  WM_T_82541_2,		WMP_F_COPPER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1274 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1275 	  WM_T_82541_2,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1278 	  "Intel i82541PI 1000BASE-T Ethernet",
   1279 	  WM_T_82541_2,		WMP_F_COPPER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1282 	  "Intel i82547EI 1000BASE-T Ethernet",
   1283 	  WM_T_82547,		WMP_F_COPPER },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1286 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1287 	  WM_T_82547,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1290 	  "Intel i82547GI 1000BASE-T Ethernet",
   1291 	  WM_T_82547_2,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1294 	  "Intel PRO/1000 PT (82571EB)",
   1295 	  WM_T_82571,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1298 	  "Intel PRO/1000 PF (82571EB)",
   1299 	  WM_T_82571,		WMP_F_FIBER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1302 	  "Intel PRO/1000 PB (82571EB)",
   1303 	  WM_T_82571,		WMP_F_SERDES },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1306 	  "Intel PRO/1000 QT (82571EB)",
   1307 	  WM_T_82571,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1310 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1311 	  WM_T_82571,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1314 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1315 	  WM_T_82571,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1318 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1319 	  WM_T_82571,		WMP_F_SERDES },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1322 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1323 	  WM_T_82571,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1326 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1327 	  WM_T_82571,		WMP_F_FIBER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1330 	  "Intel i82572EI 1000baseT Ethernet",
   1331 	  WM_T_82572,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1334 	  "Intel i82572EI 1000baseX Ethernet",
   1335 	  WM_T_82572,		WMP_F_FIBER },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1338 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1339 	  WM_T_82572,		WMP_F_SERDES },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1342 	  "Intel i82572EI 1000baseT Ethernet",
   1343 	  WM_T_82572,		WMP_F_COPPER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1346 	  "Intel i82573E",
   1347 	  WM_T_82573,		WMP_F_COPPER },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1350 	  "Intel i82573E IAMT",
   1351 	  WM_T_82573,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1354 	  "Intel i82573L Gigabit Ethernet",
   1355 	  WM_T_82573,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1358 	  "Intel i82574L",
   1359 	  WM_T_82574,		WMP_F_COPPER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1362 	  "Intel i82574L",
   1363 	  WM_T_82574,		WMP_F_COPPER },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1366 	  "Intel i82583V",
   1367 	  WM_T_82583,		WMP_F_COPPER },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1370 	  "i80003 dual 1000baseT Ethernet",
   1371 	  WM_T_80003,		WMP_F_COPPER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1374 	  "i80003 dual 1000baseX Ethernet",
   1375 	  WM_T_80003,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1378 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1379 	  WM_T_80003,		WMP_F_SERDES },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1382 	  "Intel i80003 1000baseT Ethernet",
   1383 	  WM_T_80003,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1386 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1387 	  WM_T_80003,		WMP_F_SERDES },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1390 	  "Intel i82801H (M_AMT) LAN Controller",
   1391 	  WM_T_ICH8,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1393 	  "Intel i82801H (AMT) LAN Controller",
   1394 	  WM_T_ICH8,		WMP_F_COPPER },
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1396 	  "Intel i82801H LAN Controller",
   1397 	  WM_T_ICH8,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1399 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1400 	  WM_T_ICH8,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1402 	  "Intel i82801H (M) LAN Controller",
   1403 	  WM_T_ICH8,		WMP_F_COPPER },
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1405 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1406 	  WM_T_ICH8,		WMP_F_COPPER },
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1408 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1409 	  WM_T_ICH8,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1411 	  "82567V-3 LAN Controller",
   1412 	  WM_T_ICH8,		WMP_F_COPPER },
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1414 	  "82801I (AMT) LAN Controller",
   1415 	  WM_T_ICH9,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1417 	  "82801I 10/100 LAN Controller",
   1418 	  WM_T_ICH9,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1420 	  "82801I (G) 10/100 LAN Controller",
   1421 	  WM_T_ICH9,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1423 	  "82801I (GT) 10/100 LAN Controller",
   1424 	  WM_T_ICH9,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1426 	  "82801I (C) LAN Controller",
   1427 	  WM_T_ICH9,		WMP_F_COPPER },
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1429 	  "82801I mobile LAN Controller",
   1430 	  WM_T_ICH9,		WMP_F_COPPER },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1432 	  "82801I mobile (V) LAN Controller",
   1433 	  WM_T_ICH9,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1435 	  "82801I mobile (AMT) LAN Controller",
   1436 	  WM_T_ICH9,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1438 	  "82567LM-4 LAN Controller",
   1439 	  WM_T_ICH9,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1441 	  "82567LM-2 LAN Controller",
   1442 	  WM_T_ICH10,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1444 	  "82567LF-2 LAN Controller",
   1445 	  WM_T_ICH10,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1447 	  "82567LM-3 LAN Controller",
   1448 	  WM_T_ICH10,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1450 	  "82567LF-3 LAN Controller",
   1451 	  WM_T_ICH10,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1453 	  "82567V-2 LAN Controller",
   1454 	  WM_T_ICH10,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1456 	  "82567V-3? LAN Controller",
   1457 	  WM_T_ICH10,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1459 	  "HANKSVILLE LAN Controller",
   1460 	  WM_T_ICH10,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1462 	  "PCH LAN (82577LM) Controller",
   1463 	  WM_T_PCH,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1465 	  "PCH LAN (82577LC) Controller",
   1466 	  WM_T_PCH,		WMP_F_COPPER },
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1468 	  "PCH LAN (82578DM) Controller",
   1469 	  WM_T_PCH,		WMP_F_COPPER },
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1471 	  "PCH LAN (82578DC) Controller",
   1472 	  WM_T_PCH,		WMP_F_COPPER },
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1474 	  "PCH2 LAN (82579LM) Controller",
   1475 	  WM_T_PCH2,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1477 	  "PCH2 LAN (82579V) Controller",
   1478 	  WM_T_PCH2,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1480 	  "82575EB dual-1000baseT Ethernet",
   1481 	  WM_T_82575,		WMP_F_COPPER },
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1483 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1484 	  WM_T_82575,		WMP_F_SERDES },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1486 	  "82575GB quad-1000baseT Ethernet",
   1487 	  WM_T_82575,		WMP_F_COPPER },
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1489 	  "82575GB quad-1000baseT Ethernet (PM)",
   1490 	  WM_T_82575,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1492 	  "82576 1000BaseT Ethernet",
   1493 	  WM_T_82576,		WMP_F_COPPER },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1495 	  "82576 1000BaseX Ethernet",
   1496 	  WM_T_82576,		WMP_F_FIBER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1499 	  "82576 gigabit Ethernet (SERDES)",
   1500 	  WM_T_82576,		WMP_F_SERDES },
   1501 
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1503 	  "82576 quad-1000BaseT Ethernet",
   1504 	  WM_T_82576,		WMP_F_COPPER },
   1505 
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1507 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1508 	  WM_T_82576,		WMP_F_COPPER },
   1509 
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1511 	  "82576 gigabit Ethernet",
   1512 	  WM_T_82576,		WMP_F_COPPER },
   1513 
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1515 	  "82576 gigabit Ethernet (SERDES)",
   1516 	  WM_T_82576,		WMP_F_SERDES },
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1518 	  "82576 quad-gigabit Ethernet (SERDES)",
   1519 	  WM_T_82576,		WMP_F_SERDES },
   1520 
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1522 	  "82580 1000BaseT Ethernet",
   1523 	  WM_T_82580,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1525 	  "82580 1000BaseX Ethernet",
   1526 	  WM_T_82580,		WMP_F_FIBER },
   1527 
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1529 	  "82580 1000BaseT Ethernet (SERDES)",
   1530 	  WM_T_82580,		WMP_F_SERDES },
   1531 
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1533 	  "82580 gigabit Ethernet (SGMII)",
   1534 	  WM_T_82580,		WMP_F_COPPER },
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1536 	  "82580 dual-1000BaseT Ethernet",
   1537 	  WM_T_82580,		WMP_F_COPPER },
   1538 
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1540 	  "82580 quad-1000BaseX Ethernet",
   1541 	  WM_T_82580,		WMP_F_FIBER },
   1542 
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1544 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1545 	  WM_T_82580,		WMP_F_COPPER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1548 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1549 	  WM_T_82580,		WMP_F_SERDES },
   1550 
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1552 	  "DH89XXCC 1000BASE-KX Ethernet",
   1553 	  WM_T_82580,		WMP_F_SERDES },
   1554 
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1556 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1557 	  WM_T_82580,		WMP_F_SERDES },
   1558 
   1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1560 	  "I350 Gigabit Network Connection",
   1561 	  WM_T_I350,		WMP_F_COPPER },
   1562 
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1564 	  "I350 Gigabit Fiber Network Connection",
   1565 	  WM_T_I350,		WMP_F_FIBER },
   1566 
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1568 	  "I350 Gigabit Backplane Connection",
   1569 	  WM_T_I350,		WMP_F_SERDES },
   1570 
   1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1572 	  "I350 Quad Port Gigabit Ethernet",
   1573 	  WM_T_I350,		WMP_F_SERDES },
   1574 
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1576 	  "I350 Gigabit Connection",
   1577 	  WM_T_I350,		WMP_F_COPPER },
   1578 
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1580 	  "I354 Gigabit Ethernet (KX)",
   1581 	  WM_T_I354,		WMP_F_SERDES },
   1582 
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1584 	  "I354 Gigabit Ethernet (SGMII)",
   1585 	  WM_T_I354,		WMP_F_COPPER },
   1586 
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1588 	  "I354 Gigabit Ethernet (2.5G)",
   1589 	  WM_T_I354,		WMP_F_COPPER },
   1590 
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1592 	  "I210-T1 Ethernet Server Adapter",
   1593 	  WM_T_I210,		WMP_F_COPPER },
   1594 
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1596 	  "I210 Ethernet (Copper OEM)",
   1597 	  WM_T_I210,		WMP_F_COPPER },
   1598 
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1600 	  "I210 Ethernet (Copper IT)",
   1601 	  WM_T_I210,		WMP_F_COPPER },
   1602 
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1604 	  "I210 Ethernet (Copper, FLASH less)",
   1605 	  WM_T_I210,		WMP_F_COPPER },
   1606 
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1608 	  "I210 Gigabit Ethernet (Fiber)",
   1609 	  WM_T_I210,		WMP_F_FIBER },
   1610 
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1612 	  "I210 Gigabit Ethernet (SERDES)",
   1613 	  WM_T_I210,		WMP_F_SERDES },
   1614 
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1616 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1617 	  WM_T_I210,		WMP_F_SERDES },
   1618 
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1620 	  "I210 Gigabit Ethernet (SGMII)",
   1621 	  WM_T_I210,		WMP_F_COPPER },
   1622 
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1624 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1625 	  WM_T_I210,		WMP_F_COPPER },
   1626 
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1628 	  "I211 Ethernet (COPPER)",
   1629 	  WM_T_I211,		WMP_F_COPPER },
   1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1631 	  "I217 V Ethernet Connection",
   1632 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1634 	  "I217 LM Ethernet Connection",
   1635 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1637 	  "I218 V Ethernet Connection",
   1638 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1640 	  "I218 V Ethernet Connection",
   1641 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1643 	  "I218 V Ethernet Connection",
   1644 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1646 	  "I218 LM Ethernet Connection",
   1647 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1648 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1649 	  "I218 LM Ethernet Connection",
   1650 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1652 	  "I218 LM Ethernet Connection",
   1653 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1655 	  "I219 LM Ethernet Connection",
   1656 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1658 	  "I219 LM (2) Ethernet Connection",
   1659 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1661 	  "I219 LM (3) Ethernet Connection",
   1662 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1664 	  "I219 LM (4) Ethernet Connection",
   1665 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1667 	  "I219 LM (5) Ethernet Connection",
   1668 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1670 	  "I219 LM (6) Ethernet Connection",
   1671 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1673 	  "I219 LM (7) Ethernet Connection",
   1674 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1675 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1676 	  "I219 LM (8) Ethernet Connection",
   1677 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1679 	  "I219 LM (9) Ethernet Connection",
   1680 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1682 	  "I219 LM (10) Ethernet Connection",
   1683 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1685 	  "I219 LM (11) Ethernet Connection",
   1686 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1687 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1688 	  "I219 LM (12) Ethernet Connection",
   1689 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1691 	  "I219 LM (13) Ethernet Connection",
   1692 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1694 	  "I219 LM (14) Ethernet Connection",
   1695 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1697 	  "I219 LM (15) Ethernet Connection",
   1698 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1699 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1700 	  "I219 LM (16) Ethernet Connection",
   1701 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1703 	  "I219 LM (17) Ethernet Connection",
   1704 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1706 	  "I219 LM (18) Ethernet Connection",
   1707 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1709 	  "I219 LM (19) Ethernet Connection",
   1710 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1711 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1712 	  "I219 V Ethernet Connection",
   1713 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1714 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1715 	  "I219 V (2) Ethernet Connection",
   1716 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1718 	  "I219 V (4) Ethernet Connection",
   1719 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1720 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1721 	  "I219 V (5) Ethernet Connection",
   1722 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1723 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1724 	  "I219 V (6) Ethernet Connection",
   1725 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1727 	  "I219 V (7) Ethernet Connection",
   1728 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1730 	  "I219 V (8) Ethernet Connection",
   1731 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1732 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1733 	  "I219 V (9) Ethernet Connection",
   1734 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1735 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1736 	  "I219 V (10) Ethernet Connection",
   1737 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1739 	  "I219 V (11) Ethernet Connection",
   1740 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1742 	  "I219 V (12) Ethernet Connection",
   1743 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1744 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1745 	  "I219 V (13) Ethernet Connection",
   1746 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1747 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1748 	  "I219 V (14) Ethernet Connection",
   1749 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1751 	  "I219 V (15) Ethernet Connection",
   1752 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1754 	  "I219 V (16) Ethernet Connection",
   1755 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1757 	  "I219 V (17) Ethernet Connection",
   1758 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1759 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1760 	  "I219 V (18) Ethernet Connection",
   1761 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1763 	  "I219 V (19) Ethernet Connection",
   1764 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1765 	{ 0,			0,
   1766 	  NULL,
   1767 	  0,			0 },
   1768 };
   1769 
   1770 /*
   1771  * Register read/write functions.
   1772  * Other than CSR_{READ|WRITE}().
   1773  */
   1774 
   1775 #if 0 /* Not currently used */
   1776 static inline uint32_t
   1777 wm_io_read(struct wm_softc *sc, int reg)
   1778 {
   1779 
   1780 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1781 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1782 }
   1783 #endif
   1784 
   1785 static inline void
   1786 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1787 {
   1788 
   1789 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1790 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1791 }
   1792 
   1793 static inline void
   1794 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1795     uint32_t data)
   1796 {
   1797 	uint32_t regval;
   1798 	int i;
   1799 
   1800 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1801 
   1802 	CSR_WRITE(sc, reg, regval);
   1803 
   1804 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1805 		delay(5);
   1806 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1807 			break;
   1808 	}
   1809 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1810 		aprint_error("%s: WARNING:"
   1811 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1812 		    device_xname(sc->sc_dev), reg);
   1813 	}
   1814 }
   1815 
   1816 static inline void
   1817 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1818 {
   1819 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1820 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1821 }
   1822 
   1823 /*
   1824  * Descriptor sync/init functions.
   1825  */
   1826 static inline void
   1827 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1828 {
   1829 	struct wm_softc *sc = txq->txq_sc;
   1830 
   1831 	/* If it will wrap around, sync to the end of the ring. */
   1832 	if ((start + num) > WM_NTXDESC(txq)) {
   1833 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1834 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1835 		    (WM_NTXDESC(txq) - start), ops);
   1836 		num -= (WM_NTXDESC(txq) - start);
   1837 		start = 0;
   1838 	}
   1839 
   1840 	/* Now sync whatever is left. */
   1841 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1842 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1843 }
   1844 
   1845 static inline void
   1846 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1847 {
   1848 	struct wm_softc *sc = rxq->rxq_sc;
   1849 
   1850 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1851 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1852 }
   1853 
   1854 static inline void
   1855 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1856 {
   1857 	struct wm_softc *sc = rxq->rxq_sc;
   1858 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1859 	struct mbuf *m = rxs->rxs_mbuf;
   1860 
   1861 	/*
   1862 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1863 	 * so that the payload after the Ethernet header is aligned
   1864 	 * to a 4-byte boundary.
   1865 
   1866 	 * XXX BRAINDAMAGE ALERT!
   1867 	 * The stupid chip uses the same size for every buffer, which
   1868 	 * is set in the Receive Control register.  We are using the 2K
   1869 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1870 	 * reason, we can't "scoot" packets longer than the standard
   1871 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1872 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1873 	 * the upper layer copy the headers.
   1874 	 */
   1875 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1876 
   1877 	if (sc->sc_type == WM_T_82574) {
   1878 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1879 		rxd->erx_data.erxd_addr =
   1880 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1881 		rxd->erx_data.erxd_dd = 0;
   1882 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1883 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1884 
   1885 		rxd->nqrx_data.nrxd_paddr =
   1886 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1887 		/* Currently, split header is not supported. */
   1888 		rxd->nqrx_data.nrxd_haddr = 0;
   1889 	} else {
   1890 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1891 
   1892 		wm_set_dma_addr(&rxd->wrx_addr,
   1893 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1894 		rxd->wrx_len = 0;
   1895 		rxd->wrx_cksum = 0;
   1896 		rxd->wrx_status = 0;
   1897 		rxd->wrx_errors = 0;
   1898 		rxd->wrx_special = 0;
   1899 	}
   1900 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1901 
   1902 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1903 }
   1904 
   1905 /*
   1906  * Device driver interface functions and commonly used functions.
   1907  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1908  */
   1909 
   1910 /* Lookup supported device table */
   1911 static const struct wm_product *
   1912 wm_lookup(const struct pci_attach_args *pa)
   1913 {
   1914 	const struct wm_product *wmp;
   1915 
   1916 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1917 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1918 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1919 			return wmp;
   1920 	}
   1921 	return NULL;
   1922 }
   1923 
   1924 /* The match function (ca_match) */
   1925 static int
   1926 wm_match(device_t parent, cfdata_t cf, void *aux)
   1927 {
   1928 	struct pci_attach_args *pa = aux;
   1929 
   1930 	if (wm_lookup(pa) != NULL)
   1931 		return 1;
   1932 
   1933 	return 0;
   1934 }
   1935 
   1936 /* The attach function (ca_attach) */
   1937 static void
   1938 wm_attach(device_t parent, device_t self, void *aux)
   1939 {
   1940 	struct wm_softc *sc = device_private(self);
   1941 	struct pci_attach_args *pa = aux;
   1942 	prop_dictionary_t dict;
   1943 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1944 	pci_chipset_tag_t pc = pa->pa_pc;
   1945 	int counts[PCI_INTR_TYPE_SIZE];
   1946 	pci_intr_type_t max_type;
   1947 	const char *eetype, *xname;
   1948 	bus_space_tag_t memt;
   1949 	bus_space_handle_t memh;
   1950 	bus_size_t memsize;
   1951 	int memh_valid;
   1952 	int i, error;
   1953 	const struct wm_product *wmp;
   1954 	prop_data_t ea;
   1955 	prop_number_t pn;
   1956 	uint8_t enaddr[ETHER_ADDR_LEN];
   1957 	char buf[256];
   1958 	char wqname[MAXCOMLEN];
   1959 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1960 	pcireg_t preg, memtype;
   1961 	uint16_t eeprom_data, apme_mask;
   1962 	bool force_clear_smbi;
   1963 	uint32_t link_mode;
   1964 	uint32_t reg;
   1965 
   1966 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1967 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1968 #endif
   1969 	sc->sc_dev = self;
   1970 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   1971 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1972 	sc->sc_core_stopping = false;
   1973 
   1974 	wmp = wm_lookup(pa);
   1975 #ifdef DIAGNOSTIC
   1976 	if (wmp == NULL) {
   1977 		printf("\n");
   1978 		panic("wm_attach: impossible");
   1979 	}
   1980 #endif
   1981 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1982 
   1983 	sc->sc_pc = pa->pa_pc;
   1984 	sc->sc_pcitag = pa->pa_tag;
   1985 
   1986 	if (pci_dma64_available(pa)) {
   1987 		aprint_verbose(", 64-bit DMA");
   1988 		sc->sc_dmat = pa->pa_dmat64;
   1989 	} else {
   1990 		aprint_verbose(", 32-bit DMA");
   1991 		sc->sc_dmat = pa->pa_dmat;
   1992 	}
   1993 
   1994 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1995 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1996 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1997 
   1998 	sc->sc_type = wmp->wmp_type;
   1999 
   2000 	/* Set default function pointers */
   2001 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2002 	sc->phy.release = sc->nvm.release = wm_put_null;
   2003 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2004 
   2005 	if (sc->sc_type < WM_T_82543) {
   2006 		if (sc->sc_rev < 2) {
   2007 			aprint_error_dev(sc->sc_dev,
   2008 			    "i82542 must be at least rev. 2\n");
   2009 			return;
   2010 		}
   2011 		if (sc->sc_rev < 3)
   2012 			sc->sc_type = WM_T_82542_2_0;
   2013 	}
   2014 
   2015 	/*
   2016 	 * Disable MSI for Errata:
   2017 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2018 	 *
   2019 	 *  82544: Errata 25
   2020 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2021 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2022 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2023 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2024 	 *
   2025 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2026 	 *
   2027 	 *  82571 & 82572: Errata 63
   2028 	 */
   2029 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2030 	    || (sc->sc_type == WM_T_82572))
   2031 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2032 
   2033 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2034 	    || (sc->sc_type == WM_T_82580)
   2035 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2036 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2037 		sc->sc_flags |= WM_F_NEWQUEUE;
   2038 
   2039 	/* Set device properties (mactype) */
   2040 	dict = device_properties(sc->sc_dev);
   2041 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2042 
   2043 	/*
   2044 	 * Map the device.  All devices support memory-mapped acccess,
   2045 	 * and it is really required for normal operation.
   2046 	 */
   2047 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2048 	switch (memtype) {
   2049 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2050 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2051 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2052 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2053 		break;
   2054 	default:
   2055 		memh_valid = 0;
   2056 		break;
   2057 	}
   2058 
   2059 	if (memh_valid) {
   2060 		sc->sc_st = memt;
   2061 		sc->sc_sh = memh;
   2062 		sc->sc_ss = memsize;
   2063 	} else {
   2064 		aprint_error_dev(sc->sc_dev,
   2065 		    "unable to map device registers\n");
   2066 		return;
   2067 	}
   2068 
   2069 	/*
   2070 	 * In addition, i82544 and later support I/O mapped indirect
   2071 	 * register access.  It is not desirable (nor supported in
   2072 	 * this driver) to use it for normal operation, though it is
   2073 	 * required to work around bugs in some chip versions.
   2074 	 */
   2075 	switch (sc->sc_type) {
   2076 	case WM_T_82544:
   2077 	case WM_T_82541:
   2078 	case WM_T_82541_2:
   2079 	case WM_T_82547:
   2080 	case WM_T_82547_2:
   2081 		/* First we have to find the I/O BAR. */
   2082 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2083 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2084 			if (memtype == PCI_MAPREG_TYPE_IO)
   2085 				break;
   2086 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2087 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2088 				i += 4;	/* skip high bits, too */
   2089 		}
   2090 		if (i < PCI_MAPREG_END) {
   2091 			/*
   2092 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2093 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2094 			 * It's no problem because newer chips has no this
   2095 			 * bug.
   2096 			 *
   2097 			 * The i8254x doesn't apparently respond when the
   2098 			 * I/O BAR is 0, which looks somewhat like it's not
   2099 			 * been configured.
   2100 			 */
   2101 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2102 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2103 				aprint_error_dev(sc->sc_dev,
   2104 				    "WARNING: I/O BAR at zero.\n");
   2105 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2106 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2107 			    == 0) {
   2108 				sc->sc_flags |= WM_F_IOH_VALID;
   2109 			} else
   2110 				aprint_error_dev(sc->sc_dev,
   2111 				    "WARNING: unable to map I/O space\n");
   2112 		}
   2113 		break;
   2114 	default:
   2115 		break;
   2116 	}
   2117 
   2118 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2119 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2120 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2121 	if (sc->sc_type < WM_T_82542_2_1)
   2122 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2123 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2124 
   2125 	/* Power up chip */
   2126 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2127 	    && error != EOPNOTSUPP) {
   2128 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2129 		return;
   2130 	}
   2131 
   2132 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2133 	/*
   2134 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2135 	 * resource.
   2136 	 */
   2137 	if (sc->sc_nqueues > 1) {
   2138 		max_type = PCI_INTR_TYPE_MSIX;
   2139 		/*
   2140 		 *  82583 has a MSI-X capability in the PCI configuration space
   2141 		 * but it doesn't support it. At least the document doesn't
   2142 		 * say anything about MSI-X.
   2143 		 */
   2144 		counts[PCI_INTR_TYPE_MSIX]
   2145 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2146 	} else {
   2147 		max_type = PCI_INTR_TYPE_MSI;
   2148 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2149 	}
   2150 
   2151 	/* Allocation settings */
   2152 	counts[PCI_INTR_TYPE_MSI] = 1;
   2153 	counts[PCI_INTR_TYPE_INTX] = 1;
   2154 	/* overridden by disable flags */
   2155 	if (wm_disable_msi != 0) {
   2156 		counts[PCI_INTR_TYPE_MSI] = 0;
   2157 		if (wm_disable_msix != 0) {
   2158 			max_type = PCI_INTR_TYPE_INTX;
   2159 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2160 		}
   2161 	} else if (wm_disable_msix != 0) {
   2162 		max_type = PCI_INTR_TYPE_MSI;
   2163 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2164 	}
   2165 
   2166 alloc_retry:
   2167 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2168 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2169 		return;
   2170 	}
   2171 
   2172 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2173 		error = wm_setup_msix(sc);
   2174 		if (error) {
   2175 			pci_intr_release(pc, sc->sc_intrs,
   2176 			    counts[PCI_INTR_TYPE_MSIX]);
   2177 
   2178 			/* Setup for MSI: Disable MSI-X */
   2179 			max_type = PCI_INTR_TYPE_MSI;
   2180 			counts[PCI_INTR_TYPE_MSI] = 1;
   2181 			counts[PCI_INTR_TYPE_INTX] = 1;
   2182 			goto alloc_retry;
   2183 		}
   2184 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2185 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2186 		error = wm_setup_legacy(sc);
   2187 		if (error) {
   2188 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2189 			    counts[PCI_INTR_TYPE_MSI]);
   2190 
   2191 			/* The next try is for INTx: Disable MSI */
   2192 			max_type = PCI_INTR_TYPE_INTX;
   2193 			counts[PCI_INTR_TYPE_INTX] = 1;
   2194 			goto alloc_retry;
   2195 		}
   2196 	} else {
   2197 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2198 		error = wm_setup_legacy(sc);
   2199 		if (error) {
   2200 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2201 			    counts[PCI_INTR_TYPE_INTX]);
   2202 			return;
   2203 		}
   2204 	}
   2205 
   2206 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2207 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2208 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2209 	    WQ_PERCPU | WQ_MPSAFE);
   2210 	if (error) {
   2211 		aprint_error_dev(sc->sc_dev,
   2212 		    "unable to create TxRx workqueue\n");
   2213 		goto out;
   2214 	}
   2215 
   2216 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2217 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2218 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2219 	    WQ_MPSAFE);
   2220 	if (error) {
   2221 		workqueue_destroy(sc->sc_queue_wq);
   2222 		aprint_error_dev(sc->sc_dev,
   2223 		    "unable to create reset workqueue\n");
   2224 		goto out;
   2225 	}
   2226 
   2227 	/*
   2228 	 * Check the function ID (unit number of the chip).
   2229 	 */
   2230 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2231 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2232 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2233 	    || (sc->sc_type == WM_T_82580)
   2234 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2235 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2236 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2237 	else
   2238 		sc->sc_funcid = 0;
   2239 
   2240 	/*
   2241 	 * Determine a few things about the bus we're connected to.
   2242 	 */
   2243 	if (sc->sc_type < WM_T_82543) {
   2244 		/* We don't really know the bus characteristics here. */
   2245 		sc->sc_bus_speed = 33;
   2246 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2247 		/*
   2248 		 * CSA (Communication Streaming Architecture) is about as fast
   2249 		 * a 32-bit 66MHz PCI Bus.
   2250 		 */
   2251 		sc->sc_flags |= WM_F_CSA;
   2252 		sc->sc_bus_speed = 66;
   2253 		aprint_verbose_dev(sc->sc_dev,
   2254 		    "Communication Streaming Architecture\n");
   2255 		if (sc->sc_type == WM_T_82547) {
   2256 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2257 			callout_setfunc(&sc->sc_txfifo_ch,
   2258 			    wm_82547_txfifo_stall, sc);
   2259 			aprint_verbose_dev(sc->sc_dev,
   2260 			    "using 82547 Tx FIFO stall work-around\n");
   2261 		}
   2262 	} else if (sc->sc_type >= WM_T_82571) {
   2263 		sc->sc_flags |= WM_F_PCIE;
   2264 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2265 		    && (sc->sc_type != WM_T_ICH10)
   2266 		    && (sc->sc_type != WM_T_PCH)
   2267 		    && (sc->sc_type != WM_T_PCH2)
   2268 		    && (sc->sc_type != WM_T_PCH_LPT)
   2269 		    && (sc->sc_type != WM_T_PCH_SPT)
   2270 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2271 			/* ICH* and PCH* have no PCIe capability registers */
   2272 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2273 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2274 				NULL) == 0)
   2275 				aprint_error_dev(sc->sc_dev,
   2276 				    "unable to find PCIe capability\n");
   2277 		}
   2278 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2279 	} else {
   2280 		reg = CSR_READ(sc, WMREG_STATUS);
   2281 		if (reg & STATUS_BUS64)
   2282 			sc->sc_flags |= WM_F_BUS64;
   2283 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2284 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2285 
   2286 			sc->sc_flags |= WM_F_PCIX;
   2287 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2288 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2289 				aprint_error_dev(sc->sc_dev,
   2290 				    "unable to find PCIX capability\n");
   2291 			else if (sc->sc_type != WM_T_82545_3 &&
   2292 			    sc->sc_type != WM_T_82546_3) {
   2293 				/*
   2294 				 * Work around a problem caused by the BIOS
   2295 				 * setting the max memory read byte count
   2296 				 * incorrectly.
   2297 				 */
   2298 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2299 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2300 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2301 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2302 
   2303 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2304 				    PCIX_CMD_BYTECNT_SHIFT;
   2305 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2306 				    PCIX_STATUS_MAXB_SHIFT;
   2307 				if (bytecnt > maxb) {
   2308 					aprint_verbose_dev(sc->sc_dev,
   2309 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2310 					    512 << bytecnt, 512 << maxb);
   2311 					pcix_cmd = (pcix_cmd &
   2312 					    ~PCIX_CMD_BYTECNT_MASK) |
   2313 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2314 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2315 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2316 					    pcix_cmd);
   2317 				}
   2318 			}
   2319 		}
   2320 		/*
   2321 		 * The quad port adapter is special; it has a PCIX-PCIX
   2322 		 * bridge on the board, and can run the secondary bus at
   2323 		 * a higher speed.
   2324 		 */
   2325 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2326 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2327 								      : 66;
   2328 		} else if (sc->sc_flags & WM_F_PCIX) {
   2329 			switch (reg & STATUS_PCIXSPD_MASK) {
   2330 			case STATUS_PCIXSPD_50_66:
   2331 				sc->sc_bus_speed = 66;
   2332 				break;
   2333 			case STATUS_PCIXSPD_66_100:
   2334 				sc->sc_bus_speed = 100;
   2335 				break;
   2336 			case STATUS_PCIXSPD_100_133:
   2337 				sc->sc_bus_speed = 133;
   2338 				break;
   2339 			default:
   2340 				aprint_error_dev(sc->sc_dev,
   2341 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2342 				    reg & STATUS_PCIXSPD_MASK);
   2343 				sc->sc_bus_speed = 66;
   2344 				break;
   2345 			}
   2346 		} else
   2347 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2348 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2349 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2350 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2351 	}
   2352 
   2353 	/* clear interesting stat counters */
   2354 	CSR_READ(sc, WMREG_COLC);
   2355 	CSR_READ(sc, WMREG_RXERRC);
   2356 
   2357 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2358 	    || (sc->sc_type >= WM_T_ICH8))
   2359 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2360 	if (sc->sc_type >= WM_T_ICH8)
   2361 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2362 
   2363 	/* Set PHY, NVM mutex related stuff */
   2364 	switch (sc->sc_type) {
   2365 	case WM_T_82542_2_0:
   2366 	case WM_T_82542_2_1:
   2367 	case WM_T_82543:
   2368 	case WM_T_82544:
   2369 		/* Microwire */
   2370 		sc->nvm.read = wm_nvm_read_uwire;
   2371 		sc->sc_nvm_wordsize = 64;
   2372 		sc->sc_nvm_addrbits = 6;
   2373 		break;
   2374 	case WM_T_82540:
   2375 	case WM_T_82545:
   2376 	case WM_T_82545_3:
   2377 	case WM_T_82546:
   2378 	case WM_T_82546_3:
   2379 		/* Microwire */
   2380 		sc->nvm.read = wm_nvm_read_uwire;
   2381 		reg = CSR_READ(sc, WMREG_EECD);
   2382 		if (reg & EECD_EE_SIZE) {
   2383 			sc->sc_nvm_wordsize = 256;
   2384 			sc->sc_nvm_addrbits = 8;
   2385 		} else {
   2386 			sc->sc_nvm_wordsize = 64;
   2387 			sc->sc_nvm_addrbits = 6;
   2388 		}
   2389 		sc->sc_flags |= WM_F_LOCK_EECD;
   2390 		sc->nvm.acquire = wm_get_eecd;
   2391 		sc->nvm.release = wm_put_eecd;
   2392 		break;
   2393 	case WM_T_82541:
   2394 	case WM_T_82541_2:
   2395 	case WM_T_82547:
   2396 	case WM_T_82547_2:
   2397 		reg = CSR_READ(sc, WMREG_EECD);
   2398 		/*
   2399 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2400 		 * on 8254[17], so set flags and functios before calling it.
   2401 		 */
   2402 		sc->sc_flags |= WM_F_LOCK_EECD;
   2403 		sc->nvm.acquire = wm_get_eecd;
   2404 		sc->nvm.release = wm_put_eecd;
   2405 		if (reg & EECD_EE_TYPE) {
   2406 			/* SPI */
   2407 			sc->nvm.read = wm_nvm_read_spi;
   2408 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2409 			wm_nvm_set_addrbits_size_eecd(sc);
   2410 		} else {
   2411 			/* Microwire */
   2412 			sc->nvm.read = wm_nvm_read_uwire;
   2413 			if ((reg & EECD_EE_ABITS) != 0) {
   2414 				sc->sc_nvm_wordsize = 256;
   2415 				sc->sc_nvm_addrbits = 8;
   2416 			} else {
   2417 				sc->sc_nvm_wordsize = 64;
   2418 				sc->sc_nvm_addrbits = 6;
   2419 			}
   2420 		}
   2421 		break;
   2422 	case WM_T_82571:
   2423 	case WM_T_82572:
   2424 		/* SPI */
   2425 		sc->nvm.read = wm_nvm_read_eerd;
   2426 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2427 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2428 		wm_nvm_set_addrbits_size_eecd(sc);
   2429 		sc->phy.acquire = wm_get_swsm_semaphore;
   2430 		sc->phy.release = wm_put_swsm_semaphore;
   2431 		sc->nvm.acquire = wm_get_nvm_82571;
   2432 		sc->nvm.release = wm_put_nvm_82571;
   2433 		break;
   2434 	case WM_T_82573:
   2435 	case WM_T_82574:
   2436 	case WM_T_82583:
   2437 		sc->nvm.read = wm_nvm_read_eerd;
   2438 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2439 		if (sc->sc_type == WM_T_82573) {
   2440 			sc->phy.acquire = wm_get_swsm_semaphore;
   2441 			sc->phy.release = wm_put_swsm_semaphore;
   2442 			sc->nvm.acquire = wm_get_nvm_82571;
   2443 			sc->nvm.release = wm_put_nvm_82571;
   2444 		} else {
   2445 			/* Both PHY and NVM use the same semaphore. */
   2446 			sc->phy.acquire = sc->nvm.acquire
   2447 			    = wm_get_swfwhw_semaphore;
   2448 			sc->phy.release = sc->nvm.release
   2449 			    = wm_put_swfwhw_semaphore;
   2450 		}
   2451 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2452 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2453 			sc->sc_nvm_wordsize = 2048;
   2454 		} else {
   2455 			/* SPI */
   2456 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2457 			wm_nvm_set_addrbits_size_eecd(sc);
   2458 		}
   2459 		break;
   2460 	case WM_T_82575:
   2461 	case WM_T_82576:
   2462 	case WM_T_82580:
   2463 	case WM_T_I350:
   2464 	case WM_T_I354:
   2465 	case WM_T_80003:
   2466 		/* SPI */
   2467 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2468 		wm_nvm_set_addrbits_size_eecd(sc);
   2469 		if ((sc->sc_type == WM_T_80003)
   2470 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2471 			sc->nvm.read = wm_nvm_read_eerd;
   2472 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2473 		} else {
   2474 			sc->nvm.read = wm_nvm_read_spi;
   2475 			sc->sc_flags |= WM_F_LOCK_EECD;
   2476 		}
   2477 		sc->phy.acquire = wm_get_phy_82575;
   2478 		sc->phy.release = wm_put_phy_82575;
   2479 		sc->nvm.acquire = wm_get_nvm_80003;
   2480 		sc->nvm.release = wm_put_nvm_80003;
   2481 		break;
   2482 	case WM_T_ICH8:
   2483 	case WM_T_ICH9:
   2484 	case WM_T_ICH10:
   2485 	case WM_T_PCH:
   2486 	case WM_T_PCH2:
   2487 	case WM_T_PCH_LPT:
   2488 		sc->nvm.read = wm_nvm_read_ich8;
   2489 		/* FLASH */
   2490 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2491 		sc->sc_nvm_wordsize = 2048;
   2492 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2493 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2494 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2495 			aprint_error_dev(sc->sc_dev,
   2496 			    "can't map FLASH registers\n");
   2497 			goto out;
   2498 		}
   2499 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2500 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2501 		    ICH_FLASH_SECTOR_SIZE;
   2502 		sc->sc_ich8_flash_bank_size =
   2503 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2504 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2505 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2506 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2507 		sc->sc_flashreg_offset = 0;
   2508 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2509 		sc->phy.release = wm_put_swflag_ich8lan;
   2510 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2511 		sc->nvm.release = wm_put_nvm_ich8lan;
   2512 		break;
   2513 	case WM_T_PCH_SPT:
   2514 	case WM_T_PCH_CNP:
   2515 		sc->nvm.read = wm_nvm_read_spt;
   2516 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2517 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2518 		sc->sc_flasht = sc->sc_st;
   2519 		sc->sc_flashh = sc->sc_sh;
   2520 		sc->sc_ich8_flash_base = 0;
   2521 		sc->sc_nvm_wordsize =
   2522 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2523 		    * NVM_SIZE_MULTIPLIER;
   2524 		/* It is size in bytes, we want words */
   2525 		sc->sc_nvm_wordsize /= 2;
   2526 		/* Assume 2 banks */
   2527 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2528 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2529 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2530 		sc->phy.release = wm_put_swflag_ich8lan;
   2531 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2532 		sc->nvm.release = wm_put_nvm_ich8lan;
   2533 		break;
   2534 	case WM_T_I210:
   2535 	case WM_T_I211:
   2536 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2537 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2538 		if (wm_nvm_flash_presence_i210(sc)) {
   2539 			sc->nvm.read = wm_nvm_read_eerd;
   2540 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2541 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2542 			wm_nvm_set_addrbits_size_eecd(sc);
   2543 		} else {
   2544 			sc->nvm.read = wm_nvm_read_invm;
   2545 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2546 			sc->sc_nvm_wordsize = INVM_SIZE;
   2547 		}
   2548 		sc->phy.acquire = wm_get_phy_82575;
   2549 		sc->phy.release = wm_put_phy_82575;
   2550 		sc->nvm.acquire = wm_get_nvm_80003;
   2551 		sc->nvm.release = wm_put_nvm_80003;
   2552 		break;
   2553 	default:
   2554 		break;
   2555 	}
   2556 
   2557 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2558 	switch (sc->sc_type) {
   2559 	case WM_T_82571:
   2560 	case WM_T_82572:
   2561 		reg = CSR_READ(sc, WMREG_SWSM2);
   2562 		if ((reg & SWSM2_LOCK) == 0) {
   2563 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2564 			force_clear_smbi = true;
   2565 		} else
   2566 			force_clear_smbi = false;
   2567 		break;
   2568 	case WM_T_82573:
   2569 	case WM_T_82574:
   2570 	case WM_T_82583:
   2571 		force_clear_smbi = true;
   2572 		break;
   2573 	default:
   2574 		force_clear_smbi = false;
   2575 		break;
   2576 	}
   2577 	if (force_clear_smbi) {
   2578 		reg = CSR_READ(sc, WMREG_SWSM);
   2579 		if ((reg & SWSM_SMBI) != 0)
   2580 			aprint_error_dev(sc->sc_dev,
   2581 			    "Please update the Bootagent\n");
   2582 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2583 	}
   2584 
   2585 	/*
   2586 	 * Defer printing the EEPROM type until after verifying the checksum
   2587 	 * This allows the EEPROM type to be printed correctly in the case
   2588 	 * that no EEPROM is attached.
   2589 	 */
   2590 	/*
   2591 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2592 	 * this for later, so we can fail future reads from the EEPROM.
   2593 	 */
   2594 	if (wm_nvm_validate_checksum(sc)) {
   2595 		/*
   2596 		 * Read twice again because some PCI-e parts fail the
   2597 		 * first check due to the link being in sleep state.
   2598 		 */
   2599 		if (wm_nvm_validate_checksum(sc))
   2600 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2601 	}
   2602 
   2603 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2604 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2605 	else {
   2606 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2607 		    sc->sc_nvm_wordsize);
   2608 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2609 			aprint_verbose("iNVM");
   2610 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2611 			aprint_verbose("FLASH(HW)");
   2612 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2613 			aprint_verbose("FLASH");
   2614 		else {
   2615 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2616 				eetype = "SPI";
   2617 			else
   2618 				eetype = "MicroWire";
   2619 			aprint_verbose("(%d address bits) %s EEPROM",
   2620 			    sc->sc_nvm_addrbits, eetype);
   2621 		}
   2622 	}
   2623 	wm_nvm_version(sc);
   2624 	aprint_verbose("\n");
   2625 
   2626 	/*
   2627 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2628 	 * incorrect.
   2629 	 */
   2630 	wm_gmii_setup_phytype(sc, 0, 0);
   2631 
   2632 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2633 	switch (sc->sc_type) {
   2634 	case WM_T_ICH8:
   2635 	case WM_T_ICH9:
   2636 	case WM_T_ICH10:
   2637 	case WM_T_PCH:
   2638 	case WM_T_PCH2:
   2639 	case WM_T_PCH_LPT:
   2640 	case WM_T_PCH_SPT:
   2641 	case WM_T_PCH_CNP:
   2642 		apme_mask = WUC_APME;
   2643 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2644 		if ((eeprom_data & apme_mask) != 0)
   2645 			sc->sc_flags |= WM_F_WOL;
   2646 		break;
   2647 	default:
   2648 		break;
   2649 	}
   2650 
   2651 	/* Reset the chip to a known state. */
   2652 	wm_reset(sc);
   2653 
   2654 	/*
   2655 	 * Check for I21[01] PLL workaround.
   2656 	 *
   2657 	 * Three cases:
   2658 	 * a) Chip is I211.
   2659 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2660 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2661 	 */
   2662 	if (sc->sc_type == WM_T_I211)
   2663 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2664 	if (sc->sc_type == WM_T_I210) {
   2665 		if (!wm_nvm_flash_presence_i210(sc))
   2666 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2667 		else if ((sc->sc_nvm_ver_major < 3)
   2668 		    || ((sc->sc_nvm_ver_major == 3)
   2669 			&& (sc->sc_nvm_ver_minor < 25))) {
   2670 			aprint_verbose_dev(sc->sc_dev,
   2671 			    "ROM image version %d.%d is older than 3.25\n",
   2672 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2673 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2674 		}
   2675 	}
   2676 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2677 		wm_pll_workaround_i210(sc);
   2678 
   2679 	wm_get_wakeup(sc);
   2680 
   2681 	/* Non-AMT based hardware can now take control from firmware */
   2682 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2683 		wm_get_hw_control(sc);
   2684 
   2685 	/*
   2686 	 * Read the Ethernet address from the EEPROM, if not first found
   2687 	 * in device properties.
   2688 	 */
   2689 	ea = prop_dictionary_get(dict, "mac-address");
   2690 	if (ea != NULL) {
   2691 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2692 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2693 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2694 	} else {
   2695 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2696 			aprint_error_dev(sc->sc_dev,
   2697 			    "unable to read Ethernet address\n");
   2698 			goto out;
   2699 		}
   2700 	}
   2701 
   2702 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2703 	    ether_sprintf(enaddr));
   2704 
   2705 	/*
   2706 	 * Read the config info from the EEPROM, and set up various
   2707 	 * bits in the control registers based on their contents.
   2708 	 */
   2709 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2710 	if (pn != NULL) {
   2711 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2712 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2713 	} else {
   2714 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2715 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2716 			goto out;
   2717 		}
   2718 	}
   2719 
   2720 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2721 	if (pn != NULL) {
   2722 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2723 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2724 	} else {
   2725 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2726 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2727 			goto out;
   2728 		}
   2729 	}
   2730 
   2731 	/* check for WM_F_WOL */
   2732 	switch (sc->sc_type) {
   2733 	case WM_T_82542_2_0:
   2734 	case WM_T_82542_2_1:
   2735 	case WM_T_82543:
   2736 		/* dummy? */
   2737 		eeprom_data = 0;
   2738 		apme_mask = NVM_CFG3_APME;
   2739 		break;
   2740 	case WM_T_82544:
   2741 		apme_mask = NVM_CFG2_82544_APM_EN;
   2742 		eeprom_data = cfg2;
   2743 		break;
   2744 	case WM_T_82546:
   2745 	case WM_T_82546_3:
   2746 	case WM_T_82571:
   2747 	case WM_T_82572:
   2748 	case WM_T_82573:
   2749 	case WM_T_82574:
   2750 	case WM_T_82583:
   2751 	case WM_T_80003:
   2752 	case WM_T_82575:
   2753 	case WM_T_82576:
   2754 		apme_mask = NVM_CFG3_APME;
   2755 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2756 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2757 		break;
   2758 	case WM_T_82580:
   2759 	case WM_T_I350:
   2760 	case WM_T_I354:
   2761 	case WM_T_I210:
   2762 	case WM_T_I211:
   2763 		apme_mask = NVM_CFG3_APME;
   2764 		wm_nvm_read(sc,
   2765 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2766 		    1, &eeprom_data);
   2767 		break;
   2768 	case WM_T_ICH8:
   2769 	case WM_T_ICH9:
   2770 	case WM_T_ICH10:
   2771 	case WM_T_PCH:
   2772 	case WM_T_PCH2:
   2773 	case WM_T_PCH_LPT:
   2774 	case WM_T_PCH_SPT:
   2775 	case WM_T_PCH_CNP:
   2776 		/* Already checked before wm_reset () */
   2777 		apme_mask = eeprom_data = 0;
   2778 		break;
   2779 	default: /* XXX 82540 */
   2780 		apme_mask = NVM_CFG3_APME;
   2781 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2782 		break;
   2783 	}
   2784 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2785 	if ((eeprom_data & apme_mask) != 0)
   2786 		sc->sc_flags |= WM_F_WOL;
   2787 
   2788 	/*
   2789 	 * We have the eeprom settings, now apply the special cases
   2790 	 * where the eeprom may be wrong or the board won't support
   2791 	 * wake on lan on a particular port
   2792 	 */
   2793 	switch (sc->sc_pcidevid) {
   2794 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2795 		sc->sc_flags &= ~WM_F_WOL;
   2796 		break;
   2797 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2798 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2799 		/* Wake events only supported on port A for dual fiber
   2800 		 * regardless of eeprom setting */
   2801 		if (sc->sc_funcid == 1)
   2802 			sc->sc_flags &= ~WM_F_WOL;
   2803 		break;
   2804 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2805 		/* If quad port adapter, disable WoL on all but port A */
   2806 		if (sc->sc_funcid != 0)
   2807 			sc->sc_flags &= ~WM_F_WOL;
   2808 		break;
   2809 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2810 		/* Wake events only supported on port A for dual fiber
   2811 		 * regardless of eeprom setting */
   2812 		if (sc->sc_funcid == 1)
   2813 			sc->sc_flags &= ~WM_F_WOL;
   2814 		break;
   2815 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2816 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2817 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2818 		/* If quad port adapter, disable WoL on all but port A */
   2819 		if (sc->sc_funcid != 0)
   2820 			sc->sc_flags &= ~WM_F_WOL;
   2821 		break;
   2822 	}
   2823 
   2824 	if (sc->sc_type >= WM_T_82575) {
   2825 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2826 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2827 			    nvmword);
   2828 			if ((sc->sc_type == WM_T_82575) ||
   2829 			    (sc->sc_type == WM_T_82576)) {
   2830 				/* Check NVM for autonegotiation */
   2831 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2832 				    != 0)
   2833 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2834 			}
   2835 			if ((sc->sc_type == WM_T_82575) ||
   2836 			    (sc->sc_type == WM_T_I350)) {
   2837 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2838 					sc->sc_flags |= WM_F_MAS;
   2839 			}
   2840 		}
   2841 	}
   2842 
   2843 	/*
   2844 	 * XXX need special handling for some multiple port cards
   2845 	 * to disable a paticular port.
   2846 	 */
   2847 
   2848 	if (sc->sc_type >= WM_T_82544) {
   2849 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2850 		if (pn != NULL) {
   2851 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2852 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2853 		} else {
   2854 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2855 				aprint_error_dev(sc->sc_dev,
   2856 				    "unable to read SWDPIN\n");
   2857 				goto out;
   2858 			}
   2859 		}
   2860 	}
   2861 
   2862 	if (cfg1 & NVM_CFG1_ILOS)
   2863 		sc->sc_ctrl |= CTRL_ILOS;
   2864 
   2865 	/*
   2866 	 * XXX
   2867 	 * This code isn't correct because pin 2 and 3 are located
   2868 	 * in different position on newer chips. Check all datasheet.
   2869 	 *
   2870 	 * Until resolve this problem, check if a chip < 82580
   2871 	 */
   2872 	if (sc->sc_type <= WM_T_82580) {
   2873 		if (sc->sc_type >= WM_T_82544) {
   2874 			sc->sc_ctrl |=
   2875 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2876 			    CTRL_SWDPIO_SHIFT;
   2877 			sc->sc_ctrl |=
   2878 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2879 			    CTRL_SWDPINS_SHIFT;
   2880 		} else {
   2881 			sc->sc_ctrl |=
   2882 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2883 			    CTRL_SWDPIO_SHIFT;
   2884 		}
   2885 	}
   2886 
   2887 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2888 		wm_nvm_read(sc,
   2889 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2890 		    1, &nvmword);
   2891 		if (nvmword & NVM_CFG3_ILOS)
   2892 			sc->sc_ctrl |= CTRL_ILOS;
   2893 	}
   2894 
   2895 #if 0
   2896 	if (sc->sc_type >= WM_T_82544) {
   2897 		if (cfg1 & NVM_CFG1_IPS0)
   2898 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2899 		if (cfg1 & NVM_CFG1_IPS1)
   2900 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2901 		sc->sc_ctrl_ext |=
   2902 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2903 		    CTRL_EXT_SWDPIO_SHIFT;
   2904 		sc->sc_ctrl_ext |=
   2905 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2906 		    CTRL_EXT_SWDPINS_SHIFT;
   2907 	} else {
   2908 		sc->sc_ctrl_ext |=
   2909 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2910 		    CTRL_EXT_SWDPIO_SHIFT;
   2911 	}
   2912 #endif
   2913 
   2914 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2915 #if 0
   2916 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2917 #endif
   2918 
   2919 	if (sc->sc_type == WM_T_PCH) {
   2920 		uint16_t val;
   2921 
   2922 		/* Save the NVM K1 bit setting */
   2923 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2924 
   2925 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2926 			sc->sc_nvm_k1_enabled = 1;
   2927 		else
   2928 			sc->sc_nvm_k1_enabled = 0;
   2929 	}
   2930 
   2931 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2932 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2933 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2934 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2935 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2936 	    || sc->sc_type == WM_T_82573
   2937 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2938 		/* Copper only */
   2939 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2940 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2941 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2942 	    || (sc->sc_type ==WM_T_I211)) {
   2943 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2944 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2945 		switch (link_mode) {
   2946 		case CTRL_EXT_LINK_MODE_1000KX:
   2947 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2948 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2949 			break;
   2950 		case CTRL_EXT_LINK_MODE_SGMII:
   2951 			if (wm_sgmii_uses_mdio(sc)) {
   2952 				aprint_normal_dev(sc->sc_dev,
   2953 				    "SGMII(MDIO)\n");
   2954 				sc->sc_flags |= WM_F_SGMII;
   2955 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2956 				break;
   2957 			}
   2958 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2959 			/*FALLTHROUGH*/
   2960 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2961 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2962 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2963 				if (link_mode
   2964 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2965 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2966 					sc->sc_flags |= WM_F_SGMII;
   2967 					aprint_verbose_dev(sc->sc_dev,
   2968 					    "SGMII\n");
   2969 				} else {
   2970 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2971 					aprint_verbose_dev(sc->sc_dev,
   2972 					    "SERDES\n");
   2973 				}
   2974 				break;
   2975 			}
   2976 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2977 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2978 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2979 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2980 				sc->sc_flags |= WM_F_SGMII;
   2981 			}
   2982 			/* Do not change link mode for 100BaseFX */
   2983 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2984 				break;
   2985 
   2986 			/* Change current link mode setting */
   2987 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2988 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2989 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2990 			else
   2991 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2992 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2993 			break;
   2994 		case CTRL_EXT_LINK_MODE_GMII:
   2995 		default:
   2996 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2997 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2998 			break;
   2999 		}
   3000 
   3001 		reg &= ~CTRL_EXT_I2C_ENA;
   3002 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3003 			reg |= CTRL_EXT_I2C_ENA;
   3004 		else
   3005 			reg &= ~CTRL_EXT_I2C_ENA;
   3006 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3007 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3008 			if (!wm_sgmii_uses_mdio(sc))
   3009 				wm_gmii_setup_phytype(sc, 0, 0);
   3010 			wm_reset_mdicnfg_82580(sc);
   3011 		}
   3012 	} else if (sc->sc_type < WM_T_82543 ||
   3013 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3014 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3015 			aprint_error_dev(sc->sc_dev,
   3016 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3017 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3018 		}
   3019 	} else {
   3020 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3021 			aprint_error_dev(sc->sc_dev,
   3022 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3023 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3024 		}
   3025 	}
   3026 
   3027 	if (sc->sc_type >= WM_T_PCH2)
   3028 		sc->sc_flags |= WM_F_EEE;
   3029 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3030 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3031 		/* XXX: Need special handling for I354. (not yet) */
   3032 		if (sc->sc_type != WM_T_I354)
   3033 			sc->sc_flags |= WM_F_EEE;
   3034 	}
   3035 
   3036 	/*
   3037 	 * The I350 has a bug where it always strips the CRC whether
   3038 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3039 	 */
   3040 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3041 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3042 		sc->sc_flags |= WM_F_CRC_STRIP;
   3043 
   3044 	/* Set device properties (macflags) */
   3045 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3046 
   3047 	if (sc->sc_flags != 0) {
   3048 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3049 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3050 	}
   3051 
   3052 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3053 
   3054 	/* Initialize the media structures accordingly. */
   3055 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3056 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3057 	else
   3058 		wm_tbi_mediainit(sc); /* All others */
   3059 
   3060 	ifp = &sc->sc_ethercom.ec_if;
   3061 	xname = device_xname(sc->sc_dev);
   3062 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3063 	ifp->if_softc = sc;
   3064 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3065 	ifp->if_extflags = IFEF_MPSAFE;
   3066 	ifp->if_ioctl = wm_ioctl;
   3067 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3068 		ifp->if_start = wm_nq_start;
   3069 		/*
   3070 		 * When the number of CPUs is one and the controller can use
   3071 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3072 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3073 		 * and the other is used for link status changing.
   3074 		 * In this situation, wm_nq_transmit() is disadvantageous
   3075 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3076 		 */
   3077 		if (wm_is_using_multiqueue(sc))
   3078 			ifp->if_transmit = wm_nq_transmit;
   3079 	} else {
   3080 		ifp->if_start = wm_start;
   3081 		/*
   3082 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3083 		 * described above.
   3084 		 */
   3085 		if (wm_is_using_multiqueue(sc))
   3086 			ifp->if_transmit = wm_transmit;
   3087 	}
   3088 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3089 	ifp->if_init = wm_init;
   3090 	ifp->if_stop = wm_stop;
   3091 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3092 	IFQ_SET_READY(&ifp->if_snd);
   3093 
   3094 	/* Check for jumbo frame */
   3095 	switch (sc->sc_type) {
   3096 	case WM_T_82573:
   3097 		/* XXX limited to 9234 if ASPM is disabled */
   3098 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3099 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3100 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3101 		break;
   3102 	case WM_T_82571:
   3103 	case WM_T_82572:
   3104 	case WM_T_82574:
   3105 	case WM_T_82583:
   3106 	case WM_T_82575:
   3107 	case WM_T_82576:
   3108 	case WM_T_82580:
   3109 	case WM_T_I350:
   3110 	case WM_T_I354:
   3111 	case WM_T_I210:
   3112 	case WM_T_I211:
   3113 	case WM_T_80003:
   3114 	case WM_T_ICH9:
   3115 	case WM_T_ICH10:
   3116 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3117 	case WM_T_PCH_LPT:
   3118 	case WM_T_PCH_SPT:
   3119 	case WM_T_PCH_CNP:
   3120 		/* XXX limited to 9234 */
   3121 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3122 		break;
   3123 	case WM_T_PCH:
   3124 		/* XXX limited to 4096 */
   3125 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3126 		break;
   3127 	case WM_T_82542_2_0:
   3128 	case WM_T_82542_2_1:
   3129 	case WM_T_ICH8:
   3130 		/* No support for jumbo frame */
   3131 		break;
   3132 	default:
   3133 		/* ETHER_MAX_LEN_JUMBO */
   3134 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3135 		break;
   3136 	}
   3137 
   3138 	/* If we're a i82543 or greater, we can support VLANs. */
   3139 	if (sc->sc_type >= WM_T_82543) {
   3140 		sc->sc_ethercom.ec_capabilities |=
   3141 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3142 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3143 	}
   3144 
   3145 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3146 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3147 
   3148 	/*
   3149 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3150 	 * on i82543 and later.
   3151 	 */
   3152 	if (sc->sc_type >= WM_T_82543) {
   3153 		ifp->if_capabilities |=
   3154 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3155 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3156 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3157 		    IFCAP_CSUM_TCPv6_Tx |
   3158 		    IFCAP_CSUM_UDPv6_Tx;
   3159 	}
   3160 
   3161 	/*
   3162 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3163 	 *
   3164 	 *	82541GI (8086:1076) ... no
   3165 	 *	82572EI (8086:10b9) ... yes
   3166 	 */
   3167 	if (sc->sc_type >= WM_T_82571) {
   3168 		ifp->if_capabilities |=
   3169 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3170 	}
   3171 
   3172 	/*
   3173 	 * If we're a i82544 or greater (except i82547), we can do
   3174 	 * TCP segmentation offload.
   3175 	 */
   3176 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3177 		ifp->if_capabilities |= IFCAP_TSOv4;
   3178 
   3179 	if (sc->sc_type >= WM_T_82571)
   3180 		ifp->if_capabilities |= IFCAP_TSOv6;
   3181 
   3182 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3183 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3184 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3185 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3186 
   3187 	/* Attach the interface. */
   3188 	if_initialize(ifp);
   3189 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3190 	ether_ifattach(ifp, enaddr);
   3191 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3192 	if_register(ifp);
   3193 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3194 	    RND_FLAG_DEFAULT);
   3195 
   3196 #ifdef WM_EVENT_COUNTERS
   3197 	/* Attach event counters. */
   3198 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3199 	    NULL, xname, "linkintr");
   3200 
   3201 	if (sc->sc_type >= WM_T_82542_2_1) {
   3202 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3203 		    NULL, xname, "tx_xoff");
   3204 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3205 		    NULL, xname, "tx_xon");
   3206 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3207 		    NULL, xname, "rx_xoff");
   3208 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3209 		    NULL, xname, "rx_xon");
   3210 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3211 		    NULL, xname, "rx_macctl");
   3212 	}
   3213 
   3214 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3215 	    NULL, xname, "CRC Error");
   3216 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3217 	    NULL, xname, "Symbol Error");
   3218 
   3219 	if (sc->sc_type >= WM_T_82543) {
   3220 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3221 		    NULL, xname, "Alignment Error");
   3222 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3223 		    NULL, xname, "Receive Error");
   3224 		evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
   3225 		    NULL, xname, "Carrier Extension Error");
   3226 	}
   3227 
   3228 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3229 	    NULL, xname, "Missed Packets");
   3230 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3231 	    NULL, xname, "Collision");
   3232 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3233 	    NULL, xname, "Sequence Error");
   3234 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3235 	    NULL, xname, "Receive Length Error");
   3236 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3237 	    NULL, xname, "Single Collision");
   3238 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3239 	    NULL, xname, "Excessive Collisions");
   3240 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3241 	    NULL, xname, "Multiple Collision");
   3242 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3243 	    NULL, xname, "Late Collisions");
   3244 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3245 	    NULL, xname, "Defer");
   3246 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3247 	    NULL, xname, "Good Packets Rx");
   3248 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3249 	    NULL, xname, "Broadcast Packets Rx");
   3250 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3251 	    NULL, xname, "Multicast Packets Rx");
   3252 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3253 	    NULL, xname, "Good Packets Tx");
   3254 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3255 	    NULL, xname, "Good Octets Rx");
   3256 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3257 	    NULL, xname, "Good Octets Tx");
   3258 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3259 	    NULL, xname, "Rx No Buffers");
   3260 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3261 	    NULL, xname, "Rx Undersize");
   3262 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3263 	    NULL, xname, "Rx Fragment");
   3264 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3265 	    NULL, xname, "Rx Oversize");
   3266 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3267 	    NULL, xname, "Rx Jabber");
   3268 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3269 	    NULL, xname, "Total Octets Rx");
   3270 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3271 	    NULL, xname, "Total Octets Tx");
   3272 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3273 	    NULL, xname, "Total Packets Rx");
   3274 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3275 	    NULL, xname, "Total Packets Tx");
   3276 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3277 	    NULL, xname, "Multicast Packets Tx");
   3278 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3279 	    NULL, xname, "Broadcast Packets Tx Count");
   3280 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3281 	    NULL, xname, "Packets Rx (64 bytes)");
   3282 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3283 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3284 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3285 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3286 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3287 	    NULL, xname, "Packets Rx (255-511 bytes)");
   3288 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3289 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3290 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3291 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3292 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3293 	    NULL, xname, "Packets Tx (64 bytes)");
   3294 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3295 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3296 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3297 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3298 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3299 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3300 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3301 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3302 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3303 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3304 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3305 	    NULL, xname, "Interrupt Assertion");
   3306 	evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3307 	    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3308 	evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3309 	    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3310 	evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3311 	    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3312 	evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
   3313 	    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3314 	evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3315 	    NULL, xname, "Intr. Cause Tx Queue Empty");
   3316 	evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3317 	    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3318 	evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
   3319 	    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3320 	evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3321 	    NULL, xname, "Interrupt Cause Receiver Overrun");
   3322 	if (sc->sc_type >= WM_T_82543) {
   3323 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3324 		    NULL, xname, "Tx with No CRS");
   3325 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3326 		    NULL, xname, "TCP Segmentation Context Tx");
   3327 		evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
   3328 		    NULL, xname, "TCP Segmentation Context Tx Fail");
   3329 	}
   3330 	if (sc->sc_type >= WM_T_82540) {
   3331 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3332 		    NULL, xname, "Management Packets RX");
   3333 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3334 		    NULL, xname, "Management Packets Dropped");
   3335 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3336 		    NULL, xname, "Management Packets TX");
   3337 	}
   3338 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3339 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3340 		    NULL, xname, "BMC2OS Packets received by host");
   3341 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3342 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3343 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3344 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3345 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3346 		    NULL, xname, "OS2BMC Packets received by BMC");
   3347 	}
   3348 #endif /* WM_EVENT_COUNTERS */
   3349 
   3350 	sc->sc_txrx_use_workqueue = false;
   3351 
   3352 	if (wm_phy_need_linkdown_discard(sc)) {
   3353 		DPRINTF(sc, WM_DEBUG_LINK,
   3354 		    ("%s: %s: Set linkdown discard flag\n",
   3355 			device_xname(sc->sc_dev), __func__));
   3356 		wm_set_linkdown_discard(sc);
   3357 	}
   3358 
   3359 	wm_init_sysctls(sc);
   3360 
   3361 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3362 		pmf_class_network_register(self, ifp);
   3363 	else
   3364 		aprint_error_dev(self, "couldn't establish power handler\n");
   3365 
   3366 	sc->sc_flags |= WM_F_ATTACHED;
   3367 out:
   3368 	return;
   3369 }
   3370 
   3371 /* The detach function (ca_detach) */
   3372 static int
   3373 wm_detach(device_t self, int flags __unused)
   3374 {
   3375 	struct wm_softc *sc = device_private(self);
   3376 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3377 	int i;
   3378 
   3379 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3380 		return 0;
   3381 
   3382 	/* Stop the interface. Callouts are stopped in it. */
   3383 	IFNET_LOCK(ifp);
   3384 	sc->sc_dying = true;
   3385 	wm_stop(ifp, 1);
   3386 	IFNET_UNLOCK(ifp);
   3387 
   3388 	pmf_device_deregister(self);
   3389 
   3390 	sysctl_teardown(&sc->sc_sysctllog);
   3391 
   3392 #ifdef WM_EVENT_COUNTERS
   3393 	evcnt_detach(&sc->sc_ev_linkintr);
   3394 
   3395 	if (sc->sc_type >= WM_T_82542_2_1) {
   3396 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3397 		evcnt_detach(&sc->sc_ev_tx_xon);
   3398 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3399 		evcnt_detach(&sc->sc_ev_rx_xon);
   3400 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3401 	}
   3402 
   3403 	evcnt_detach(&sc->sc_ev_crcerrs);
   3404 	evcnt_detach(&sc->sc_ev_symerrc);
   3405 
   3406 	if (sc->sc_type >= WM_T_82543) {
   3407 		evcnt_detach(&sc->sc_ev_algnerrc);
   3408 		evcnt_detach(&sc->sc_ev_rxerrc);
   3409 		evcnt_detach(&sc->sc_ev_cexterr);
   3410 	}
   3411 	evcnt_detach(&sc->sc_ev_mpc);
   3412 	evcnt_detach(&sc->sc_ev_colc);
   3413 	evcnt_detach(&sc->sc_ev_sec);
   3414 	evcnt_detach(&sc->sc_ev_rlec);
   3415 	evcnt_detach(&sc->sc_ev_scc);
   3416 	evcnt_detach(&sc->sc_ev_ecol);
   3417 	evcnt_detach(&sc->sc_ev_mcc);
   3418 	evcnt_detach(&sc->sc_ev_latecol);
   3419 	evcnt_detach(&sc->sc_ev_dc);
   3420 	evcnt_detach(&sc->sc_ev_gprc);
   3421 	evcnt_detach(&sc->sc_ev_bprc);
   3422 	evcnt_detach(&sc->sc_ev_mprc);
   3423 	evcnt_detach(&sc->sc_ev_gptc);
   3424 	evcnt_detach(&sc->sc_ev_gorc);
   3425 	evcnt_detach(&sc->sc_ev_gotc);
   3426 	evcnt_detach(&sc->sc_ev_rnbc);
   3427 	evcnt_detach(&sc->sc_ev_ruc);
   3428 	evcnt_detach(&sc->sc_ev_rfc);
   3429 	evcnt_detach(&sc->sc_ev_roc);
   3430 	evcnt_detach(&sc->sc_ev_rjc);
   3431 	evcnt_detach(&sc->sc_ev_tor);
   3432 	evcnt_detach(&sc->sc_ev_tot);
   3433 	evcnt_detach(&sc->sc_ev_tpr);
   3434 	evcnt_detach(&sc->sc_ev_tpt);
   3435 	evcnt_detach(&sc->sc_ev_mptc);
   3436 	evcnt_detach(&sc->sc_ev_bptc);
   3437 	evcnt_detach(&sc->sc_ev_prc64);
   3438 	evcnt_detach(&sc->sc_ev_prc127);
   3439 	evcnt_detach(&sc->sc_ev_prc255);
   3440 	evcnt_detach(&sc->sc_ev_prc511);
   3441 	evcnt_detach(&sc->sc_ev_prc1023);
   3442 	evcnt_detach(&sc->sc_ev_prc1522);
   3443 	evcnt_detach(&sc->sc_ev_ptc64);
   3444 	evcnt_detach(&sc->sc_ev_ptc127);
   3445 	evcnt_detach(&sc->sc_ev_ptc255);
   3446 	evcnt_detach(&sc->sc_ev_ptc511);
   3447 	evcnt_detach(&sc->sc_ev_ptc1023);
   3448 	evcnt_detach(&sc->sc_ev_ptc1522);
   3449 	evcnt_detach(&sc->sc_ev_iac);
   3450 	evcnt_detach(&sc->sc_ev_icrxptc);
   3451 	evcnt_detach(&sc->sc_ev_icrxatc);
   3452 	evcnt_detach(&sc->sc_ev_ictxptc);
   3453 	evcnt_detach(&sc->sc_ev_ictxact);
   3454 	evcnt_detach(&sc->sc_ev_ictxqec);
   3455 	evcnt_detach(&sc->sc_ev_ictxqmtc);
   3456 	evcnt_detach(&sc->sc_ev_icrxdmtc);
   3457 	evcnt_detach(&sc->sc_ev_icrxoc);
   3458 	if (sc->sc_type >= WM_T_82543) {
   3459 		evcnt_detach(&sc->sc_ev_tncrs);
   3460 		evcnt_detach(&sc->sc_ev_tsctc);
   3461 		evcnt_detach(&sc->sc_ev_tsctfc);
   3462 	}
   3463 	if (sc->sc_type >= WM_T_82540) {
   3464 		evcnt_detach(&sc->sc_ev_mgtprc);
   3465 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3466 		evcnt_detach(&sc->sc_ev_mgtptc);
   3467 	}
   3468 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3469 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3470 		evcnt_detach(&sc->sc_ev_o2bspc);
   3471 		evcnt_detach(&sc->sc_ev_b2ospc);
   3472 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3473 	}
   3474 #endif /* WM_EVENT_COUNTERS */
   3475 
   3476 	rnd_detach_source(&sc->rnd_source);
   3477 
   3478 	/* Tell the firmware about the release */
   3479 	mutex_enter(sc->sc_core_lock);
   3480 	wm_release_manageability(sc);
   3481 	wm_release_hw_control(sc);
   3482 	wm_enable_wakeup(sc);
   3483 	mutex_exit(sc->sc_core_lock);
   3484 
   3485 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3486 
   3487 	ether_ifdetach(ifp);
   3488 	if_detach(ifp);
   3489 	if_percpuq_destroy(sc->sc_ipq);
   3490 
   3491 	/* Delete all remaining media. */
   3492 	ifmedia_fini(&sc->sc_mii.mii_media);
   3493 
   3494 	/* Unload RX dmamaps and free mbufs */
   3495 	for (i = 0; i < sc->sc_nqueues; i++) {
   3496 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3497 		mutex_enter(rxq->rxq_lock);
   3498 		wm_rxdrain(rxq);
   3499 		mutex_exit(rxq->rxq_lock);
   3500 	}
   3501 	/* Must unlock here */
   3502 
   3503 	/* Disestablish the interrupt handler */
   3504 	for (i = 0; i < sc->sc_nintrs; i++) {
   3505 		if (sc->sc_ihs[i] != NULL) {
   3506 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3507 			sc->sc_ihs[i] = NULL;
   3508 		}
   3509 	}
   3510 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3511 
   3512 	/* wm_stop() ensured that the workqueues are stopped. */
   3513 	workqueue_destroy(sc->sc_queue_wq);
   3514 	workqueue_destroy(sc->sc_reset_wq);
   3515 
   3516 	for (i = 0; i < sc->sc_nqueues; i++)
   3517 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3518 
   3519 	wm_free_txrx_queues(sc);
   3520 
   3521 	/* Unmap the registers */
   3522 	if (sc->sc_ss) {
   3523 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3524 		sc->sc_ss = 0;
   3525 	}
   3526 	if (sc->sc_ios) {
   3527 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3528 		sc->sc_ios = 0;
   3529 	}
   3530 	if (sc->sc_flashs) {
   3531 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3532 		sc->sc_flashs = 0;
   3533 	}
   3534 
   3535 	if (sc->sc_core_lock)
   3536 		mutex_obj_free(sc->sc_core_lock);
   3537 	if (sc->sc_ich_phymtx)
   3538 		mutex_obj_free(sc->sc_ich_phymtx);
   3539 	if (sc->sc_ich_nvmmtx)
   3540 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3541 
   3542 	return 0;
   3543 }
   3544 
   3545 static bool
   3546 wm_suspend(device_t self, const pmf_qual_t *qual)
   3547 {
   3548 	struct wm_softc *sc = device_private(self);
   3549 
   3550 	wm_release_manageability(sc);
   3551 	wm_release_hw_control(sc);
   3552 	wm_enable_wakeup(sc);
   3553 
   3554 	return true;
   3555 }
   3556 
   3557 static bool
   3558 wm_resume(device_t self, const pmf_qual_t *qual)
   3559 {
   3560 	struct wm_softc *sc = device_private(self);
   3561 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3562 	pcireg_t reg;
   3563 	char buf[256];
   3564 
   3565 	reg = CSR_READ(sc, WMREG_WUS);
   3566 	if (reg != 0) {
   3567 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3568 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3569 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3570 	}
   3571 
   3572 	if (sc->sc_type >= WM_T_PCH2)
   3573 		wm_resume_workarounds_pchlan(sc);
   3574 	IFNET_LOCK(ifp);
   3575 	if ((ifp->if_flags & IFF_UP) == 0) {
   3576 		/* >= PCH_SPT hardware workaround before reset. */
   3577 		if (sc->sc_type >= WM_T_PCH_SPT)
   3578 			wm_flush_desc_rings(sc);
   3579 
   3580 		wm_reset(sc);
   3581 		/* Non-AMT based hardware can now take control from firmware */
   3582 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3583 			wm_get_hw_control(sc);
   3584 		wm_init_manageability(sc);
   3585 	} else {
   3586 		/*
   3587 		 * We called pmf_class_network_register(), so if_init() is
   3588 		 * automatically called when IFF_UP. wm_reset(),
   3589 		 * wm_get_hw_control() and wm_init_manageability() are called
   3590 		 * via wm_init().
   3591 		 */
   3592 	}
   3593 	IFNET_UNLOCK(ifp);
   3594 
   3595 	return true;
   3596 }
   3597 
   3598 /*
   3599  * wm_watchdog:
   3600  *
   3601  *	Watchdog checker.
   3602  */
   3603 static bool
   3604 wm_watchdog(struct ifnet *ifp)
   3605 {
   3606 	int qid;
   3607 	struct wm_softc *sc = ifp->if_softc;
   3608 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3609 
   3610 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3611 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3612 
   3613 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3614 	}
   3615 
   3616 #ifdef WM_DEBUG
   3617 	if (sc->sc_trigger_reset) {
   3618 		/* debug operation, no need for atomicity or reliability */
   3619 		sc->sc_trigger_reset = 0;
   3620 		hang_queue++;
   3621 	}
   3622 #endif
   3623 
   3624 	if (hang_queue == 0)
   3625 		return true;
   3626 
   3627 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3628 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3629 
   3630 	return false;
   3631 }
   3632 
   3633 /*
   3634  * Perform an interface watchdog reset.
   3635  */
   3636 static void
   3637 wm_handle_reset_work(struct work *work, void *arg)
   3638 {
   3639 	struct wm_softc * const sc = arg;
   3640 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3641 
   3642 	/* Don't want ioctl operations to happen */
   3643 	IFNET_LOCK(ifp);
   3644 
   3645 	/* reset the interface. */
   3646 	wm_init(ifp);
   3647 
   3648 	IFNET_UNLOCK(ifp);
   3649 
   3650 	/*
   3651 	 * There are still some upper layer processing which call
   3652 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3653 	 */
   3654 	/* Try to get more packets going. */
   3655 	ifp->if_start(ifp);
   3656 
   3657 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3658 }
   3659 
   3660 
   3661 static void
   3662 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3663 {
   3664 
   3665 	mutex_enter(txq->txq_lock);
   3666 	if (txq->txq_sending &&
   3667 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3668 		wm_watchdog_txq_locked(ifp, txq, hang);
   3669 
   3670 	mutex_exit(txq->txq_lock);
   3671 }
   3672 
   3673 static void
   3674 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3675     uint16_t *hang)
   3676 {
   3677 	struct wm_softc *sc = ifp->if_softc;
   3678 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3679 
   3680 	KASSERT(mutex_owned(txq->txq_lock));
   3681 
   3682 	/*
   3683 	 * Since we're using delayed interrupts, sweep up
   3684 	 * before we report an error.
   3685 	 */
   3686 	wm_txeof(txq, UINT_MAX);
   3687 
   3688 	if (txq->txq_sending)
   3689 		*hang |= __BIT(wmq->wmq_id);
   3690 
   3691 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3692 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3693 		    device_xname(sc->sc_dev));
   3694 	} else {
   3695 #ifdef WM_DEBUG
   3696 		int i, j;
   3697 		struct wm_txsoft *txs;
   3698 #endif
   3699 		log(LOG_ERR,
   3700 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3701 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3702 		    txq->txq_next);
   3703 		if_statinc(ifp, if_oerrors);
   3704 #ifdef WM_DEBUG
   3705 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3706 		     i = WM_NEXTTXS(txq, i)) {
   3707 			txs = &txq->txq_soft[i];
   3708 			printf("txs %d tx %d -> %d\n",
   3709 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3710 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3711 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3712 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3713 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3714 					printf("\t %#08x%08x\n",
   3715 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3716 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3717 				} else {
   3718 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3719 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3720 					    txq->txq_descs[j].wtx_addr.wa_low);
   3721 					printf("\t %#04x%02x%02x%08x\n",
   3722 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3723 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3724 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3725 					    txq->txq_descs[j].wtx_cmdlen);
   3726 				}
   3727 				if (j == txs->txs_lastdesc)
   3728 					break;
   3729 			}
   3730 		}
   3731 #endif
   3732 	}
   3733 }
   3734 
   3735 /*
   3736  * wm_tick:
   3737  *
   3738  *	One second timer, used to check link status, sweep up
   3739  *	completed transmit jobs, etc.
   3740  */
   3741 static void
   3742 wm_tick(void *arg)
   3743 {
   3744 	struct wm_softc *sc = arg;
   3745 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3746 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   3747 	    cexterr;
   3748 
   3749 	mutex_enter(sc->sc_core_lock);
   3750 
   3751 	if (sc->sc_core_stopping) {
   3752 		mutex_exit(sc->sc_core_lock);
   3753 		return;
   3754 	}
   3755 
   3756 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   3757 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   3758 	mpc = CSR_READ(sc, WMREG_MPC);
   3759 	colc = CSR_READ(sc, WMREG_COLC);
   3760 	sec = CSR_READ(sc, WMREG_SEC);
   3761 	rlec = CSR_READ(sc, WMREG_RLEC);
   3762 
   3763 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   3764 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   3765 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   3766 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   3767 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   3768 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   3769 
   3770 	if (sc->sc_type >= WM_T_82542_2_1) {
   3771 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3772 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3773 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3774 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3775 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3776 	}
   3777 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   3778 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   3779 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   3780 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   3781 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   3782 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   3783 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   3784 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   3785 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   3786 
   3787 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   3788 	    CSR_READ(sc, WMREG_GORCL) + CSR_READ(sc, WMREG_GORCH));
   3789 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   3790 	    CSR_READ(sc, WMREG_GOTCL) + CSR_READ(sc, WMREG_GOTCH));
   3791 
   3792 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   3793 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   3794 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   3795 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   3796 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   3797 
   3798 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   3799 	    CSR_READ(sc, WMREG_TORL) + CSR_READ(sc, WMREG_TORH));
   3800 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   3801 	    CSR_READ(sc, WMREG_TOTL) + CSR_READ(sc, WMREG_TOTH));
   3802 
   3803 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   3804 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   3805 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   3806 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   3807 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   3808 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   3809 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   3810 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   3811 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   3812 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   3813 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   3814 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   3815 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   3816 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   3817 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   3818 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   3819 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   3820 	WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   3821 	WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   3822 	WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   3823 	WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
   3824 	WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   3825 	WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
   3826 	WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
   3827 	WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   3828 
   3829 	if (sc->sc_type >= WM_T_82543) {
   3830 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   3831 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   3832 		cexterr = CSR_READ(sc, WMREG_CEXTERR);
   3833 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   3834 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   3835 		WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   3836 
   3837 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   3838 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   3839 		WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
   3840 	} else
   3841 		algnerrc = rxerrc = cexterr = 0;
   3842 
   3843 	if (sc->sc_type >= WM_T_82540) {
   3844 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   3845 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   3846 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   3847 	}
   3848 	if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
   3849 	    && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
   3850 		WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
   3851 		WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
   3852 		WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
   3853 		WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
   3854 	}
   3855 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3856 	if_statadd_ref(nsr, if_collisions, colc);
   3857 	if_statadd_ref(nsr, if_ierrors,
   3858 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   3859 	/*
   3860 	 * WMREG_RNBC is incremented when there are no available buffers in
   3861 	 * host memory. It does not mean the number of dropped packets, because
   3862 	 * an Ethernet controller can receive packets in such case if there is
   3863 	 * space in the phy's FIFO.
   3864 	 *
   3865 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3866 	 * own EVCNT instead of if_iqdrops.
   3867 	 */
   3868 	if_statadd_ref(nsr, if_iqdrops, mpc);
   3869 	IF_STAT_PUTREF(ifp);
   3870 
   3871 	if (sc->sc_flags & WM_F_HAS_MII)
   3872 		mii_tick(&sc->sc_mii);
   3873 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3874 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3875 		wm_serdes_tick(sc);
   3876 	else
   3877 		wm_tbi_tick(sc);
   3878 
   3879 	mutex_exit(sc->sc_core_lock);
   3880 
   3881 	if (wm_watchdog(ifp))
   3882 		callout_schedule(&sc->sc_tick_ch, hz);
   3883 }
   3884 
   3885 static int
   3886 wm_ifflags_cb(struct ethercom *ec)
   3887 {
   3888 	struct ifnet *ifp = &ec->ec_if;
   3889 	struct wm_softc *sc = ifp->if_softc;
   3890 	u_short iffchange;
   3891 	int ecchange;
   3892 	bool needreset = false;
   3893 	int rc = 0;
   3894 
   3895 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3896 		device_xname(sc->sc_dev), __func__));
   3897 
   3898 	KASSERT(IFNET_LOCKED(ifp));
   3899 
   3900 	mutex_enter(sc->sc_core_lock);
   3901 
   3902 	/*
   3903 	 * Check for if_flags.
   3904 	 * Main usage is to prevent linkdown when opening bpf.
   3905 	 */
   3906 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3907 	sc->sc_if_flags = ifp->if_flags;
   3908 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3909 		needreset = true;
   3910 		goto ec;
   3911 	}
   3912 
   3913 	/* iff related updates */
   3914 	if ((iffchange & IFF_PROMISC) != 0)
   3915 		wm_set_filter(sc);
   3916 
   3917 	wm_set_vlan(sc);
   3918 
   3919 ec:
   3920 	/* Check for ec_capenable. */
   3921 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3922 	sc->sc_ec_capenable = ec->ec_capenable;
   3923 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3924 		needreset = true;
   3925 		goto out;
   3926 	}
   3927 
   3928 	/* ec related updates */
   3929 	wm_set_eee(sc);
   3930 
   3931 out:
   3932 	if (needreset)
   3933 		rc = ENETRESET;
   3934 	mutex_exit(sc->sc_core_lock);
   3935 
   3936 	return rc;
   3937 }
   3938 
   3939 static bool
   3940 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3941 {
   3942 
   3943 	switch (sc->sc_phytype) {
   3944 	case WMPHY_82577: /* ihphy */
   3945 	case WMPHY_82578: /* atphy */
   3946 	case WMPHY_82579: /* ihphy */
   3947 	case WMPHY_I217: /* ihphy */
   3948 	case WMPHY_82580: /* ihphy */
   3949 	case WMPHY_I350: /* ihphy */
   3950 		return true;
   3951 	default:
   3952 		return false;
   3953 	}
   3954 }
   3955 
   3956 static void
   3957 wm_set_linkdown_discard(struct wm_softc *sc)
   3958 {
   3959 
   3960 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3961 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3962 
   3963 		mutex_enter(txq->txq_lock);
   3964 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3965 		mutex_exit(txq->txq_lock);
   3966 	}
   3967 }
   3968 
   3969 static void
   3970 wm_clear_linkdown_discard(struct wm_softc *sc)
   3971 {
   3972 
   3973 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3974 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3975 
   3976 		mutex_enter(txq->txq_lock);
   3977 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3978 		mutex_exit(txq->txq_lock);
   3979 	}
   3980 }
   3981 
   3982 /*
   3983  * wm_ioctl:		[ifnet interface function]
   3984  *
   3985  *	Handle control requests from the operator.
   3986  */
   3987 static int
   3988 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3989 {
   3990 	struct wm_softc *sc = ifp->if_softc;
   3991 	struct ifreq *ifr = (struct ifreq *)data;
   3992 	struct ifaddr *ifa = (struct ifaddr *)data;
   3993 	struct sockaddr_dl *sdl;
   3994 	int error;
   3995 
   3996 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3997 		device_xname(sc->sc_dev), __func__));
   3998 
   3999 	switch (cmd) {
   4000 	case SIOCADDMULTI:
   4001 	case SIOCDELMULTI:
   4002 		break;
   4003 	default:
   4004 		KASSERT(IFNET_LOCKED(ifp));
   4005 	}
   4006 
   4007 	switch (cmd) {
   4008 	case SIOCSIFMEDIA:
   4009 		mutex_enter(sc->sc_core_lock);
   4010 		/* Flow control requires full-duplex mode. */
   4011 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4012 		    (ifr->ifr_media & IFM_FDX) == 0)
   4013 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4014 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4015 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4016 				/* We can do both TXPAUSE and RXPAUSE. */
   4017 				ifr->ifr_media |=
   4018 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4019 			}
   4020 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4021 		}
   4022 		mutex_exit(sc->sc_core_lock);
   4023 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4024 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4025 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4026 				DPRINTF(sc, WM_DEBUG_LINK,
   4027 				    ("%s: %s: Set linkdown discard flag\n",
   4028 					device_xname(sc->sc_dev), __func__));
   4029 				wm_set_linkdown_discard(sc);
   4030 			}
   4031 		}
   4032 		break;
   4033 	case SIOCINITIFADDR:
   4034 		mutex_enter(sc->sc_core_lock);
   4035 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4036 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4037 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4038 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4039 			/* Unicast address is the first multicast entry */
   4040 			wm_set_filter(sc);
   4041 			error = 0;
   4042 			mutex_exit(sc->sc_core_lock);
   4043 			break;
   4044 		}
   4045 		mutex_exit(sc->sc_core_lock);
   4046 		/*FALLTHROUGH*/
   4047 	default:
   4048 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4049 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4050 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4051 				DPRINTF(sc, WM_DEBUG_LINK,
   4052 				    ("%s: %s: Set linkdown discard flag\n",
   4053 					device_xname(sc->sc_dev), __func__));
   4054 				wm_set_linkdown_discard(sc);
   4055 			}
   4056 		}
   4057 		const int s = splnet();
   4058 		/* It may call wm_start, so unlock here */
   4059 		error = ether_ioctl(ifp, cmd, data);
   4060 		splx(s);
   4061 		if (error != ENETRESET)
   4062 			break;
   4063 
   4064 		error = 0;
   4065 
   4066 		if (cmd == SIOCSIFCAP)
   4067 			error = if_init(ifp);
   4068 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4069 			mutex_enter(sc->sc_core_lock);
   4070 			if (sc->sc_if_flags & IFF_RUNNING) {
   4071 				/*
   4072 				 * Multicast list has changed; set the
   4073 				 * hardware filter accordingly.
   4074 				 */
   4075 				wm_set_filter(sc);
   4076 			}
   4077 			mutex_exit(sc->sc_core_lock);
   4078 		}
   4079 		break;
   4080 	}
   4081 
   4082 	return error;
   4083 }
   4084 
   4085 /* MAC address related */
   4086 
   4087 /*
   4088  * Get the offset of MAC address and return it.
   4089  * If error occured, use offset 0.
   4090  */
   4091 static uint16_t
   4092 wm_check_alt_mac_addr(struct wm_softc *sc)
   4093 {
   4094 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4095 	uint16_t offset = NVM_OFF_MACADDR;
   4096 
   4097 	/* Try to read alternative MAC address pointer */
   4098 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4099 		return 0;
   4100 
   4101 	/* Check pointer if it's valid or not. */
   4102 	if ((offset == 0x0000) || (offset == 0xffff))
   4103 		return 0;
   4104 
   4105 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4106 	/*
   4107 	 * Check whether alternative MAC address is valid or not.
   4108 	 * Some cards have non 0xffff pointer but those don't use
   4109 	 * alternative MAC address in reality.
   4110 	 *
   4111 	 * Check whether the broadcast bit is set or not.
   4112 	 */
   4113 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4114 		if (((myea[0] & 0xff) & 0x01) == 0)
   4115 			return offset; /* Found */
   4116 
   4117 	/* Not found */
   4118 	return 0;
   4119 }
   4120 
   4121 static int
   4122 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4123 {
   4124 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4125 	uint16_t offset = NVM_OFF_MACADDR;
   4126 	int do_invert = 0;
   4127 
   4128 	switch (sc->sc_type) {
   4129 	case WM_T_82580:
   4130 	case WM_T_I350:
   4131 	case WM_T_I354:
   4132 		/* EEPROM Top Level Partitioning */
   4133 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4134 		break;
   4135 	case WM_T_82571:
   4136 	case WM_T_82575:
   4137 	case WM_T_82576:
   4138 	case WM_T_80003:
   4139 	case WM_T_I210:
   4140 	case WM_T_I211:
   4141 		offset = wm_check_alt_mac_addr(sc);
   4142 		if (offset == 0)
   4143 			if ((sc->sc_funcid & 0x01) == 1)
   4144 				do_invert = 1;
   4145 		break;
   4146 	default:
   4147 		if ((sc->sc_funcid & 0x01) == 1)
   4148 			do_invert = 1;
   4149 		break;
   4150 	}
   4151 
   4152 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4153 		goto bad;
   4154 
   4155 	enaddr[0] = myea[0] & 0xff;
   4156 	enaddr[1] = myea[0] >> 8;
   4157 	enaddr[2] = myea[1] & 0xff;
   4158 	enaddr[3] = myea[1] >> 8;
   4159 	enaddr[4] = myea[2] & 0xff;
   4160 	enaddr[5] = myea[2] >> 8;
   4161 
   4162 	/*
   4163 	 * Toggle the LSB of the MAC address on the second port
   4164 	 * of some dual port cards.
   4165 	 */
   4166 	if (do_invert != 0)
   4167 		enaddr[5] ^= 1;
   4168 
   4169 	return 0;
   4170 
   4171 bad:
   4172 	return -1;
   4173 }
   4174 
   4175 /*
   4176  * wm_set_ral:
   4177  *
   4178  *	Set an entery in the receive address list.
   4179  */
   4180 static void
   4181 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4182 {
   4183 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4184 	uint32_t wlock_mac;
   4185 	int rv;
   4186 
   4187 	if (enaddr != NULL) {
   4188 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4189 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4190 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4191 		ral_hi |= RAL_AV;
   4192 	} else {
   4193 		ral_lo = 0;
   4194 		ral_hi = 0;
   4195 	}
   4196 
   4197 	switch (sc->sc_type) {
   4198 	case WM_T_82542_2_0:
   4199 	case WM_T_82542_2_1:
   4200 	case WM_T_82543:
   4201 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4202 		CSR_WRITE_FLUSH(sc);
   4203 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4204 		CSR_WRITE_FLUSH(sc);
   4205 		break;
   4206 	case WM_T_PCH2:
   4207 	case WM_T_PCH_LPT:
   4208 	case WM_T_PCH_SPT:
   4209 	case WM_T_PCH_CNP:
   4210 		if (idx == 0) {
   4211 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4212 			CSR_WRITE_FLUSH(sc);
   4213 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4214 			CSR_WRITE_FLUSH(sc);
   4215 			return;
   4216 		}
   4217 		if (sc->sc_type != WM_T_PCH2) {
   4218 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4219 			    FWSM_WLOCK_MAC);
   4220 			addrl = WMREG_SHRAL(idx - 1);
   4221 			addrh = WMREG_SHRAH(idx - 1);
   4222 		} else {
   4223 			wlock_mac = 0;
   4224 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4225 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4226 		}
   4227 
   4228 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4229 			rv = wm_get_swflag_ich8lan(sc);
   4230 			if (rv != 0)
   4231 				return;
   4232 			CSR_WRITE(sc, addrl, ral_lo);
   4233 			CSR_WRITE_FLUSH(sc);
   4234 			CSR_WRITE(sc, addrh, ral_hi);
   4235 			CSR_WRITE_FLUSH(sc);
   4236 			wm_put_swflag_ich8lan(sc);
   4237 		}
   4238 
   4239 		break;
   4240 	default:
   4241 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4242 		CSR_WRITE_FLUSH(sc);
   4243 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4244 		CSR_WRITE_FLUSH(sc);
   4245 		break;
   4246 	}
   4247 }
   4248 
   4249 /*
   4250  * wm_mchash:
   4251  *
   4252  *	Compute the hash of the multicast address for the 4096-bit
   4253  *	multicast filter.
   4254  */
   4255 static uint32_t
   4256 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4257 {
   4258 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4259 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4260 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4261 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4262 	uint32_t hash;
   4263 
   4264 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4265 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4266 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4267 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4268 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4269 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4270 		return (hash & 0x3ff);
   4271 	}
   4272 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4273 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4274 
   4275 	return (hash & 0xfff);
   4276 }
   4277 
   4278 /*
   4279  *
   4280  *
   4281  */
   4282 static int
   4283 wm_rar_count(struct wm_softc *sc)
   4284 {
   4285 	int size;
   4286 
   4287 	switch (sc->sc_type) {
   4288 	case WM_T_ICH8:
   4289 		size = WM_RAL_TABSIZE_ICH8 -1;
   4290 		break;
   4291 	case WM_T_ICH9:
   4292 	case WM_T_ICH10:
   4293 	case WM_T_PCH:
   4294 		size = WM_RAL_TABSIZE_ICH8;
   4295 		break;
   4296 	case WM_T_PCH2:
   4297 		size = WM_RAL_TABSIZE_PCH2;
   4298 		break;
   4299 	case WM_T_PCH_LPT:
   4300 	case WM_T_PCH_SPT:
   4301 	case WM_T_PCH_CNP:
   4302 		size = WM_RAL_TABSIZE_PCH_LPT;
   4303 		break;
   4304 	case WM_T_82575:
   4305 	case WM_T_I210:
   4306 	case WM_T_I211:
   4307 		size = WM_RAL_TABSIZE_82575;
   4308 		break;
   4309 	case WM_T_82576:
   4310 	case WM_T_82580:
   4311 		size = WM_RAL_TABSIZE_82576;
   4312 		break;
   4313 	case WM_T_I350:
   4314 	case WM_T_I354:
   4315 		size = WM_RAL_TABSIZE_I350;
   4316 		break;
   4317 	default:
   4318 		size = WM_RAL_TABSIZE;
   4319 	}
   4320 
   4321 	return size;
   4322 }
   4323 
   4324 /*
   4325  * wm_set_filter:
   4326  *
   4327  *	Set up the receive filter.
   4328  */
   4329 static void
   4330 wm_set_filter(struct wm_softc *sc)
   4331 {
   4332 	struct ethercom *ec = &sc->sc_ethercom;
   4333 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4334 	struct ether_multi *enm;
   4335 	struct ether_multistep step;
   4336 	bus_addr_t mta_reg;
   4337 	uint32_t hash, reg, bit;
   4338 	int i, size, ralmax, rv;
   4339 
   4340 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4341 		device_xname(sc->sc_dev), __func__));
   4342 	KASSERT(mutex_owned(sc->sc_core_lock));
   4343 
   4344 	if (sc->sc_type >= WM_T_82544)
   4345 		mta_reg = WMREG_CORDOVA_MTA;
   4346 	else
   4347 		mta_reg = WMREG_MTA;
   4348 
   4349 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4350 
   4351 	if (sc->sc_if_flags & IFF_BROADCAST)
   4352 		sc->sc_rctl |= RCTL_BAM;
   4353 	if (sc->sc_if_flags & IFF_PROMISC) {
   4354 		sc->sc_rctl |= RCTL_UPE;
   4355 		ETHER_LOCK(ec);
   4356 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4357 		ETHER_UNLOCK(ec);
   4358 		goto allmulti;
   4359 	}
   4360 
   4361 	/*
   4362 	 * Set the station address in the first RAL slot, and
   4363 	 * clear the remaining slots.
   4364 	 */
   4365 	size = wm_rar_count(sc);
   4366 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4367 
   4368 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4369 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4370 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4371 		switch (i) {
   4372 		case 0:
   4373 			/* We can use all entries */
   4374 			ralmax = size;
   4375 			break;
   4376 		case 1:
   4377 			/* Only RAR[0] */
   4378 			ralmax = 1;
   4379 			break;
   4380 		default:
   4381 			/* Available SHRA + RAR[0] */
   4382 			ralmax = i + 1;
   4383 		}
   4384 	} else
   4385 		ralmax = size;
   4386 	for (i = 1; i < size; i++) {
   4387 		if (i < ralmax)
   4388 			wm_set_ral(sc, NULL, i);
   4389 	}
   4390 
   4391 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4392 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4393 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4394 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4395 		size = WM_ICH8_MC_TABSIZE;
   4396 	else
   4397 		size = WM_MC_TABSIZE;
   4398 	/* Clear out the multicast table. */
   4399 	for (i = 0; i < size; i++) {
   4400 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4401 		CSR_WRITE_FLUSH(sc);
   4402 	}
   4403 
   4404 	ETHER_LOCK(ec);
   4405 	ETHER_FIRST_MULTI(step, ec, enm);
   4406 	while (enm != NULL) {
   4407 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4408 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4409 			ETHER_UNLOCK(ec);
   4410 			/*
   4411 			 * We must listen to a range of multicast addresses.
   4412 			 * For now, just accept all multicasts, rather than
   4413 			 * trying to set only those filter bits needed to match
   4414 			 * the range.  (At this time, the only use of address
   4415 			 * ranges is for IP multicast routing, for which the
   4416 			 * range is big enough to require all bits set.)
   4417 			 */
   4418 			goto allmulti;
   4419 		}
   4420 
   4421 		hash = wm_mchash(sc, enm->enm_addrlo);
   4422 
   4423 		reg = (hash >> 5);
   4424 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4425 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4426 		    || (sc->sc_type == WM_T_PCH2)
   4427 		    || (sc->sc_type == WM_T_PCH_LPT)
   4428 		    || (sc->sc_type == WM_T_PCH_SPT)
   4429 		    || (sc->sc_type == WM_T_PCH_CNP))
   4430 			reg &= 0x1f;
   4431 		else
   4432 			reg &= 0x7f;
   4433 		bit = hash & 0x1f;
   4434 
   4435 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4436 		hash |= 1U << bit;
   4437 
   4438 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4439 			/*
   4440 			 * 82544 Errata 9: Certain register cannot be written
   4441 			 * with particular alignments in PCI-X bus operation
   4442 			 * (FCAH, MTA and VFTA).
   4443 			 */
   4444 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4445 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4446 			CSR_WRITE_FLUSH(sc);
   4447 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4448 			CSR_WRITE_FLUSH(sc);
   4449 		} else {
   4450 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4451 			CSR_WRITE_FLUSH(sc);
   4452 		}
   4453 
   4454 		ETHER_NEXT_MULTI(step, enm);
   4455 	}
   4456 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4457 	ETHER_UNLOCK(ec);
   4458 
   4459 	goto setit;
   4460 
   4461 allmulti:
   4462 	sc->sc_rctl |= RCTL_MPE;
   4463 
   4464 setit:
   4465 	if (sc->sc_type >= WM_T_PCH2) {
   4466 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4467 		    && (ifp->if_mtu > ETHERMTU))
   4468 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4469 		else
   4470 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4471 		if (rv != 0)
   4472 			device_printf(sc->sc_dev,
   4473 			    "Failed to do workaround for jumbo frame.\n");
   4474 	}
   4475 
   4476 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4477 }
   4478 
   4479 /* Reset and init related */
   4480 
   4481 static void
   4482 wm_set_vlan(struct wm_softc *sc)
   4483 {
   4484 
   4485 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4486 		device_xname(sc->sc_dev), __func__));
   4487 
   4488 	/* Deal with VLAN enables. */
   4489 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4490 		sc->sc_ctrl |= CTRL_VME;
   4491 	else
   4492 		sc->sc_ctrl &= ~CTRL_VME;
   4493 
   4494 	/* Write the control registers. */
   4495 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4496 }
   4497 
   4498 static void
   4499 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4500 {
   4501 	uint32_t gcr;
   4502 	pcireg_t ctrl2;
   4503 
   4504 	gcr = CSR_READ(sc, WMREG_GCR);
   4505 
   4506 	/* Only take action if timeout value is defaulted to 0 */
   4507 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4508 		goto out;
   4509 
   4510 	if ((gcr & GCR_CAP_VER2) == 0) {
   4511 		gcr |= GCR_CMPL_TMOUT_10MS;
   4512 		goto out;
   4513 	}
   4514 
   4515 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4516 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4517 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4518 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4519 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4520 
   4521 out:
   4522 	/* Disable completion timeout resend */
   4523 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4524 
   4525 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4526 }
   4527 
   4528 void
   4529 wm_get_auto_rd_done(struct wm_softc *sc)
   4530 {
   4531 	int i;
   4532 
   4533 	/* wait for eeprom to reload */
   4534 	switch (sc->sc_type) {
   4535 	case WM_T_82571:
   4536 	case WM_T_82572:
   4537 	case WM_T_82573:
   4538 	case WM_T_82574:
   4539 	case WM_T_82583:
   4540 	case WM_T_82575:
   4541 	case WM_T_82576:
   4542 	case WM_T_82580:
   4543 	case WM_T_I350:
   4544 	case WM_T_I354:
   4545 	case WM_T_I210:
   4546 	case WM_T_I211:
   4547 	case WM_T_80003:
   4548 	case WM_T_ICH8:
   4549 	case WM_T_ICH9:
   4550 		for (i = 0; i < 10; i++) {
   4551 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4552 				break;
   4553 			delay(1000);
   4554 		}
   4555 		if (i == 10) {
   4556 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4557 			    "complete\n", device_xname(sc->sc_dev));
   4558 		}
   4559 		break;
   4560 	default:
   4561 		break;
   4562 	}
   4563 }
   4564 
   4565 void
   4566 wm_lan_init_done(struct wm_softc *sc)
   4567 {
   4568 	uint32_t reg = 0;
   4569 	int i;
   4570 
   4571 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4572 		device_xname(sc->sc_dev), __func__));
   4573 
   4574 	/* Wait for eeprom to reload */
   4575 	switch (sc->sc_type) {
   4576 	case WM_T_ICH10:
   4577 	case WM_T_PCH:
   4578 	case WM_T_PCH2:
   4579 	case WM_T_PCH_LPT:
   4580 	case WM_T_PCH_SPT:
   4581 	case WM_T_PCH_CNP:
   4582 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4583 			reg = CSR_READ(sc, WMREG_STATUS);
   4584 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4585 				break;
   4586 			delay(100);
   4587 		}
   4588 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4589 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4590 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4591 		}
   4592 		break;
   4593 	default:
   4594 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4595 		    __func__);
   4596 		break;
   4597 	}
   4598 
   4599 	reg &= ~STATUS_LAN_INIT_DONE;
   4600 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4601 }
   4602 
   4603 void
   4604 wm_get_cfg_done(struct wm_softc *sc)
   4605 {
   4606 	int mask;
   4607 	uint32_t reg;
   4608 	int i;
   4609 
   4610 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4611 		device_xname(sc->sc_dev), __func__));
   4612 
   4613 	/* Wait for eeprom to reload */
   4614 	switch (sc->sc_type) {
   4615 	case WM_T_82542_2_0:
   4616 	case WM_T_82542_2_1:
   4617 		/* null */
   4618 		break;
   4619 	case WM_T_82543:
   4620 	case WM_T_82544:
   4621 	case WM_T_82540:
   4622 	case WM_T_82545:
   4623 	case WM_T_82545_3:
   4624 	case WM_T_82546:
   4625 	case WM_T_82546_3:
   4626 	case WM_T_82541:
   4627 	case WM_T_82541_2:
   4628 	case WM_T_82547:
   4629 	case WM_T_82547_2:
   4630 	case WM_T_82573:
   4631 	case WM_T_82574:
   4632 	case WM_T_82583:
   4633 		/* generic */
   4634 		delay(10*1000);
   4635 		break;
   4636 	case WM_T_80003:
   4637 	case WM_T_82571:
   4638 	case WM_T_82572:
   4639 	case WM_T_82575:
   4640 	case WM_T_82576:
   4641 	case WM_T_82580:
   4642 	case WM_T_I350:
   4643 	case WM_T_I354:
   4644 	case WM_T_I210:
   4645 	case WM_T_I211:
   4646 		if (sc->sc_type == WM_T_82571) {
   4647 			/* Only 82571 shares port 0 */
   4648 			mask = EEMNGCTL_CFGDONE_0;
   4649 		} else
   4650 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4651 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4652 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4653 				break;
   4654 			delay(1000);
   4655 		}
   4656 		if (i >= WM_PHY_CFG_TIMEOUT)
   4657 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4658 				device_xname(sc->sc_dev), __func__));
   4659 		break;
   4660 	case WM_T_ICH8:
   4661 	case WM_T_ICH9:
   4662 	case WM_T_ICH10:
   4663 	case WM_T_PCH:
   4664 	case WM_T_PCH2:
   4665 	case WM_T_PCH_LPT:
   4666 	case WM_T_PCH_SPT:
   4667 	case WM_T_PCH_CNP:
   4668 		delay(10*1000);
   4669 		if (sc->sc_type >= WM_T_ICH10)
   4670 			wm_lan_init_done(sc);
   4671 		else
   4672 			wm_get_auto_rd_done(sc);
   4673 
   4674 		/* Clear PHY Reset Asserted bit */
   4675 		reg = CSR_READ(sc, WMREG_STATUS);
   4676 		if ((reg & STATUS_PHYRA) != 0)
   4677 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4678 		break;
   4679 	default:
   4680 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4681 		    __func__);
   4682 		break;
   4683 	}
   4684 }
   4685 
   4686 int
   4687 wm_phy_post_reset(struct wm_softc *sc)
   4688 {
   4689 	device_t dev = sc->sc_dev;
   4690 	uint16_t reg;
   4691 	int rv = 0;
   4692 
   4693 	/* This function is only for ICH8 and newer. */
   4694 	if (sc->sc_type < WM_T_ICH8)
   4695 		return 0;
   4696 
   4697 	if (wm_phy_resetisblocked(sc)) {
   4698 		/* XXX */
   4699 		device_printf(dev, "PHY is blocked\n");
   4700 		return -1;
   4701 	}
   4702 
   4703 	/* Allow time for h/w to get to quiescent state after reset */
   4704 	delay(10*1000);
   4705 
   4706 	/* Perform any necessary post-reset workarounds */
   4707 	if (sc->sc_type == WM_T_PCH)
   4708 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4709 	else if (sc->sc_type == WM_T_PCH2)
   4710 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4711 	if (rv != 0)
   4712 		return rv;
   4713 
   4714 	/* Clear the host wakeup bit after lcd reset */
   4715 	if (sc->sc_type >= WM_T_PCH) {
   4716 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4717 		reg &= ~BM_WUC_HOST_WU_BIT;
   4718 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4719 	}
   4720 
   4721 	/* Configure the LCD with the extended configuration region in NVM */
   4722 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4723 		return rv;
   4724 
   4725 	/* Configure the LCD with the OEM bits in NVM */
   4726 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4727 
   4728 	if (sc->sc_type == WM_T_PCH2) {
   4729 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4730 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4731 			delay(10 * 1000);
   4732 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4733 		}
   4734 		/* Set EEE LPI Update Timer to 200usec */
   4735 		rv = sc->phy.acquire(sc);
   4736 		if (rv)
   4737 			return rv;
   4738 		rv = wm_write_emi_reg_locked(dev,
   4739 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4740 		sc->phy.release(sc);
   4741 	}
   4742 
   4743 	return rv;
   4744 }
   4745 
   4746 /* Only for PCH and newer */
   4747 static int
   4748 wm_write_smbus_addr(struct wm_softc *sc)
   4749 {
   4750 	uint32_t strap, freq;
   4751 	uint16_t phy_data;
   4752 	int rv;
   4753 
   4754 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4755 		device_xname(sc->sc_dev), __func__));
   4756 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4757 
   4758 	strap = CSR_READ(sc, WMREG_STRAP);
   4759 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4760 
   4761 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4762 	if (rv != 0)
   4763 		return rv;
   4764 
   4765 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4766 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4767 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4768 
   4769 	if (sc->sc_phytype == WMPHY_I217) {
   4770 		/* Restore SMBus frequency */
   4771 		if (freq --) {
   4772 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4773 			    | HV_SMB_ADDR_FREQ_HIGH);
   4774 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4775 			    HV_SMB_ADDR_FREQ_LOW);
   4776 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4777 			    HV_SMB_ADDR_FREQ_HIGH);
   4778 		} else
   4779 			DPRINTF(sc, WM_DEBUG_INIT,
   4780 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4781 				device_xname(sc->sc_dev), __func__));
   4782 	}
   4783 
   4784 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4785 	    phy_data);
   4786 }
   4787 
   4788 static int
   4789 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4790 {
   4791 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4792 	uint16_t phy_page = 0;
   4793 	int rv = 0;
   4794 
   4795 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4796 		device_xname(sc->sc_dev), __func__));
   4797 
   4798 	switch (sc->sc_type) {
   4799 	case WM_T_ICH8:
   4800 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4801 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4802 			return 0;
   4803 
   4804 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4805 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4806 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4807 			break;
   4808 		}
   4809 		/* FALLTHROUGH */
   4810 	case WM_T_PCH:
   4811 	case WM_T_PCH2:
   4812 	case WM_T_PCH_LPT:
   4813 	case WM_T_PCH_SPT:
   4814 	case WM_T_PCH_CNP:
   4815 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4816 		break;
   4817 	default:
   4818 		return 0;
   4819 	}
   4820 
   4821 	if ((rv = sc->phy.acquire(sc)) != 0)
   4822 		return rv;
   4823 
   4824 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4825 	if ((reg & sw_cfg_mask) == 0)
   4826 		goto release;
   4827 
   4828 	/*
   4829 	 * Make sure HW does not configure LCD from PHY extended configuration
   4830 	 * before SW configuration
   4831 	 */
   4832 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4833 	if ((sc->sc_type < WM_T_PCH2)
   4834 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4835 		goto release;
   4836 
   4837 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4838 		device_xname(sc->sc_dev), __func__));
   4839 	/* word_addr is in DWORD */
   4840 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4841 
   4842 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4843 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4844 	if (cnf_size == 0)
   4845 		goto release;
   4846 
   4847 	if (((sc->sc_type == WM_T_PCH)
   4848 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4849 	    || (sc->sc_type > WM_T_PCH)) {
   4850 		/*
   4851 		 * HW configures the SMBus address and LEDs when the OEM and
   4852 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4853 		 * are cleared, SW will configure them instead.
   4854 		 */
   4855 		DPRINTF(sc, WM_DEBUG_INIT,
   4856 		    ("%s: %s: Configure SMBus and LED\n",
   4857 			device_xname(sc->sc_dev), __func__));
   4858 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4859 			goto release;
   4860 
   4861 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4862 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4863 		    (uint16_t)reg);
   4864 		if (rv != 0)
   4865 			goto release;
   4866 	}
   4867 
   4868 	/* Configure LCD from extended configuration region. */
   4869 	for (i = 0; i < cnf_size; i++) {
   4870 		uint16_t reg_data, reg_addr;
   4871 
   4872 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4873 			goto release;
   4874 
   4875 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4876 			goto release;
   4877 
   4878 		if (reg_addr == IGPHY_PAGE_SELECT)
   4879 			phy_page = reg_data;
   4880 
   4881 		reg_addr &= IGPHY_MAXREGADDR;
   4882 		reg_addr |= phy_page;
   4883 
   4884 		KASSERT(sc->phy.writereg_locked != NULL);
   4885 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4886 		    reg_data);
   4887 	}
   4888 
   4889 release:
   4890 	sc->phy.release(sc);
   4891 	return rv;
   4892 }
   4893 
   4894 /*
   4895  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4896  *  @sc:       pointer to the HW structure
   4897  *  @d0_state: boolean if entering d0 or d3 device state
   4898  *
   4899  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4900  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4901  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4902  */
   4903 int
   4904 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4905 {
   4906 	uint32_t mac_reg;
   4907 	uint16_t oem_reg;
   4908 	int rv;
   4909 
   4910 	if (sc->sc_type < WM_T_PCH)
   4911 		return 0;
   4912 
   4913 	rv = sc->phy.acquire(sc);
   4914 	if (rv != 0)
   4915 		return rv;
   4916 
   4917 	if (sc->sc_type == WM_T_PCH) {
   4918 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4919 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4920 			goto release;
   4921 	}
   4922 
   4923 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4924 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4925 		goto release;
   4926 
   4927 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4928 
   4929 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4930 	if (rv != 0)
   4931 		goto release;
   4932 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4933 
   4934 	if (d0_state) {
   4935 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4936 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4937 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4938 			oem_reg |= HV_OEM_BITS_LPLU;
   4939 	} else {
   4940 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4941 		    != 0)
   4942 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4943 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4944 		    != 0)
   4945 			oem_reg |= HV_OEM_BITS_LPLU;
   4946 	}
   4947 
   4948 	/* Set Restart auto-neg to activate the bits */
   4949 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4950 	    && (wm_phy_resetisblocked(sc) == false))
   4951 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4952 
   4953 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4954 
   4955 release:
   4956 	sc->phy.release(sc);
   4957 
   4958 	return rv;
   4959 }
   4960 
   4961 /* Init hardware bits */
   4962 void
   4963 wm_initialize_hardware_bits(struct wm_softc *sc)
   4964 {
   4965 	uint32_t tarc0, tarc1, reg;
   4966 
   4967 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4968 		device_xname(sc->sc_dev), __func__));
   4969 
   4970 	/* For 82571 variant, 80003 and ICHs */
   4971 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4972 	    || (sc->sc_type >= WM_T_80003)) {
   4973 
   4974 		/* Transmit Descriptor Control 0 */
   4975 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4976 		reg |= TXDCTL_COUNT_DESC;
   4977 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4978 
   4979 		/* Transmit Descriptor Control 1 */
   4980 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4981 		reg |= TXDCTL_COUNT_DESC;
   4982 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4983 
   4984 		/* TARC0 */
   4985 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4986 		switch (sc->sc_type) {
   4987 		case WM_T_82571:
   4988 		case WM_T_82572:
   4989 		case WM_T_82573:
   4990 		case WM_T_82574:
   4991 		case WM_T_82583:
   4992 		case WM_T_80003:
   4993 			/* Clear bits 30..27 */
   4994 			tarc0 &= ~__BITS(30, 27);
   4995 			break;
   4996 		default:
   4997 			break;
   4998 		}
   4999 
   5000 		switch (sc->sc_type) {
   5001 		case WM_T_82571:
   5002 		case WM_T_82572:
   5003 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5004 
   5005 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5006 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5007 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5008 			/* 8257[12] Errata No.7 */
   5009 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5010 
   5011 			/* TARC1 bit 28 */
   5012 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5013 				tarc1 &= ~__BIT(28);
   5014 			else
   5015 				tarc1 |= __BIT(28);
   5016 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5017 
   5018 			/*
   5019 			 * 8257[12] Errata No.13
   5020 			 * Disable Dyamic Clock Gating.
   5021 			 */
   5022 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5023 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5024 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5025 			break;
   5026 		case WM_T_82573:
   5027 		case WM_T_82574:
   5028 		case WM_T_82583:
   5029 			if ((sc->sc_type == WM_T_82574)
   5030 			    || (sc->sc_type == WM_T_82583))
   5031 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5032 
   5033 			/* Extended Device Control */
   5034 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5035 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5036 			reg |= __BIT(22);	/* Set bit 22 */
   5037 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5038 
   5039 			/* Device Control */
   5040 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5041 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5042 
   5043 			/* PCIe Control Register */
   5044 			/*
   5045 			 * 82573 Errata (unknown).
   5046 			 *
   5047 			 * 82574 Errata 25 and 82583 Errata 12
   5048 			 * "Dropped Rx Packets":
   5049 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5050 			 */
   5051 			reg = CSR_READ(sc, WMREG_GCR);
   5052 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5053 			CSR_WRITE(sc, WMREG_GCR, reg);
   5054 
   5055 			if ((sc->sc_type == WM_T_82574)
   5056 			    || (sc->sc_type == WM_T_82583)) {
   5057 				/*
   5058 				 * Document says this bit must be set for
   5059 				 * proper operation.
   5060 				 */
   5061 				reg = CSR_READ(sc, WMREG_GCR);
   5062 				reg |= __BIT(22);
   5063 				CSR_WRITE(sc, WMREG_GCR, reg);
   5064 
   5065 				/*
   5066 				 * Apply workaround for hardware errata
   5067 				 * documented in errata docs Fixes issue where
   5068 				 * some error prone or unreliable PCIe
   5069 				 * completions are occurring, particularly
   5070 				 * with ASPM enabled. Without fix, issue can
   5071 				 * cause Tx timeouts.
   5072 				 */
   5073 				reg = CSR_READ(sc, WMREG_GCR2);
   5074 				reg |= __BIT(0);
   5075 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5076 			}
   5077 			break;
   5078 		case WM_T_80003:
   5079 			/* TARC0 */
   5080 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5081 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5082 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5083 
   5084 			/* TARC1 bit 28 */
   5085 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5086 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5087 				tarc1 &= ~__BIT(28);
   5088 			else
   5089 				tarc1 |= __BIT(28);
   5090 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5091 			break;
   5092 		case WM_T_ICH8:
   5093 		case WM_T_ICH9:
   5094 		case WM_T_ICH10:
   5095 		case WM_T_PCH:
   5096 		case WM_T_PCH2:
   5097 		case WM_T_PCH_LPT:
   5098 		case WM_T_PCH_SPT:
   5099 		case WM_T_PCH_CNP:
   5100 			/* TARC0 */
   5101 			if (sc->sc_type == WM_T_ICH8) {
   5102 				/* Set TARC0 bits 29 and 28 */
   5103 				tarc0 |= __BITS(29, 28);
   5104 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5105 				tarc0 |= __BIT(29);
   5106 				/*
   5107 				 *  Drop bit 28. From Linux.
   5108 				 * See I218/I219 spec update
   5109 				 * "5. Buffer Overrun While the I219 is
   5110 				 * Processing DMA Transactions"
   5111 				 */
   5112 				tarc0 &= ~__BIT(28);
   5113 			}
   5114 			/* Set TARC0 bits 23,24,26,27 */
   5115 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5116 
   5117 			/* CTRL_EXT */
   5118 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5119 			reg |= __BIT(22);	/* Set bit 22 */
   5120 			/*
   5121 			 * Enable PHY low-power state when MAC is at D3
   5122 			 * w/o WoL
   5123 			 */
   5124 			if (sc->sc_type >= WM_T_PCH)
   5125 				reg |= CTRL_EXT_PHYPDEN;
   5126 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5127 
   5128 			/* TARC1 */
   5129 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5130 			/* bit 28 */
   5131 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5132 				tarc1 &= ~__BIT(28);
   5133 			else
   5134 				tarc1 |= __BIT(28);
   5135 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5136 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5137 
   5138 			/* Device Status */
   5139 			if (sc->sc_type == WM_T_ICH8) {
   5140 				reg = CSR_READ(sc, WMREG_STATUS);
   5141 				reg &= ~__BIT(31);
   5142 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5143 
   5144 			}
   5145 
   5146 			/* IOSFPC */
   5147 			if (sc->sc_type == WM_T_PCH_SPT) {
   5148 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5149 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5150 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5151 			}
   5152 			/*
   5153 			 * Work-around descriptor data corruption issue during
   5154 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5155 			 * capability.
   5156 			 */
   5157 			reg = CSR_READ(sc, WMREG_RFCTL);
   5158 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5159 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5160 			break;
   5161 		default:
   5162 			break;
   5163 		}
   5164 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5165 
   5166 		switch (sc->sc_type) {
   5167 		case WM_T_82571:
   5168 		case WM_T_82572:
   5169 		case WM_T_82573:
   5170 		case WM_T_80003:
   5171 		case WM_T_ICH8:
   5172 			/*
   5173 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
   5174 			 * others to avoid RSS Hash Value bug.
   5175 			 */
   5176 			reg = CSR_READ(sc, WMREG_RFCTL);
   5177 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5178 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5179 			break;
   5180 		case WM_T_82574:
   5181 			/* Use extened Rx descriptor. */
   5182 			reg = CSR_READ(sc, WMREG_RFCTL);
   5183 			reg |= WMREG_RFCTL_EXSTEN;
   5184 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5185 			break;
   5186 		default:
   5187 			break;
   5188 		}
   5189 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5190 		/*
   5191 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5192 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5193 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5194 		 * Correctly by the Device"
   5195 		 *
   5196 		 * I354(C2000) Errata AVR53:
   5197 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5198 		 * Hang"
   5199 		 */
   5200 		reg = CSR_READ(sc, WMREG_RFCTL);
   5201 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5202 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5203 	}
   5204 }
   5205 
   5206 static uint32_t
   5207 wm_rxpbs_adjust_82580(uint32_t val)
   5208 {
   5209 	uint32_t rv = 0;
   5210 
   5211 	if (val < __arraycount(wm_82580_rxpbs_table))
   5212 		rv = wm_82580_rxpbs_table[val];
   5213 
   5214 	return rv;
   5215 }
   5216 
   5217 /*
   5218  * wm_reset_phy:
   5219  *
   5220  *	generic PHY reset function.
   5221  *	Same as e1000_phy_hw_reset_generic()
   5222  */
   5223 static int
   5224 wm_reset_phy(struct wm_softc *sc)
   5225 {
   5226 	uint32_t reg;
   5227 	int rv;
   5228 
   5229 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5230 		device_xname(sc->sc_dev), __func__));
   5231 	if (wm_phy_resetisblocked(sc))
   5232 		return -1;
   5233 
   5234 	rv = sc->phy.acquire(sc);
   5235 	if (rv) {
   5236 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5237 		    __func__, rv);
   5238 		return rv;
   5239 	}
   5240 
   5241 	reg = CSR_READ(sc, WMREG_CTRL);
   5242 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5243 	CSR_WRITE_FLUSH(sc);
   5244 
   5245 	delay(sc->phy.reset_delay_us);
   5246 
   5247 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5248 	CSR_WRITE_FLUSH(sc);
   5249 
   5250 	delay(150);
   5251 
   5252 	sc->phy.release(sc);
   5253 
   5254 	wm_get_cfg_done(sc);
   5255 	wm_phy_post_reset(sc);
   5256 
   5257 	return 0;
   5258 }
   5259 
   5260 /*
   5261  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5262  *
   5263  * In i219, the descriptor rings must be emptied before resetting the HW
   5264  * or before changing the device state to D3 during runtime (runtime PM).
   5265  *
   5266  * Failure to do this will cause the HW to enter a unit hang state which can
   5267  * only be released by PCI reset on the device.
   5268  *
   5269  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5270  */
   5271 static void
   5272 wm_flush_desc_rings(struct wm_softc *sc)
   5273 {
   5274 	pcireg_t preg;
   5275 	uint32_t reg;
   5276 	struct wm_txqueue *txq;
   5277 	wiseman_txdesc_t *txd;
   5278 	int nexttx;
   5279 	uint32_t rctl;
   5280 
   5281 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5282 
   5283 	/* First, disable MULR fix in FEXTNVM11 */
   5284 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5285 	reg |= FEXTNVM11_DIS_MULRFIX;
   5286 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5287 
   5288 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5289 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5290 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5291 		return;
   5292 
   5293 	/*
   5294 	 * Remove all descriptors from the tx_ring.
   5295 	 *
   5296 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5297 	 * happens when the HW reads the regs. We assign the ring itself as
   5298 	 * the data of the next descriptor. We don't care about the data we are
   5299 	 * about to reset the HW.
   5300 	 */
   5301 #ifdef WM_DEBUG
   5302 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5303 #endif
   5304 	reg = CSR_READ(sc, WMREG_TCTL);
   5305 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5306 
   5307 	txq = &sc->sc_queue[0].wmq_txq;
   5308 	nexttx = txq->txq_next;
   5309 	txd = &txq->txq_descs[nexttx];
   5310 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5311 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5312 	txd->wtx_fields.wtxu_status = 0;
   5313 	txd->wtx_fields.wtxu_options = 0;
   5314 	txd->wtx_fields.wtxu_vlan = 0;
   5315 
   5316 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5317 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5318 
   5319 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5320 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5321 	CSR_WRITE_FLUSH(sc);
   5322 	delay(250);
   5323 
   5324 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5325 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5326 		return;
   5327 
   5328 	/*
   5329 	 * Mark all descriptors in the RX ring as consumed and disable the
   5330 	 * rx ring.
   5331 	 */
   5332 #ifdef WM_DEBUG
   5333 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5334 #endif
   5335 	rctl = CSR_READ(sc, WMREG_RCTL);
   5336 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5337 	CSR_WRITE_FLUSH(sc);
   5338 	delay(150);
   5339 
   5340 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5341 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5342 	reg &= 0xffffc000;
   5343 	/*
   5344 	 * Update thresholds: prefetch threshold to 31, host threshold
   5345 	 * to 1 and make sure the granularity is "descriptors" and not
   5346 	 * "cache lines"
   5347 	 */
   5348 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5349 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5350 
   5351 	/* Momentarily enable the RX ring for the changes to take effect */
   5352 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5353 	CSR_WRITE_FLUSH(sc);
   5354 	delay(150);
   5355 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5356 }
   5357 
   5358 /*
   5359  * wm_reset:
   5360  *
   5361  *	Reset the i82542 chip.
   5362  */
   5363 static void
   5364 wm_reset(struct wm_softc *sc)
   5365 {
   5366 	int phy_reset = 0;
   5367 	int i, error = 0;
   5368 	uint32_t reg;
   5369 	uint16_t kmreg;
   5370 	int rv;
   5371 
   5372 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5373 		device_xname(sc->sc_dev), __func__));
   5374 	KASSERT(sc->sc_type != 0);
   5375 
   5376 	/*
   5377 	 * Allocate on-chip memory according to the MTU size.
   5378 	 * The Packet Buffer Allocation register must be written
   5379 	 * before the chip is reset.
   5380 	 */
   5381 	switch (sc->sc_type) {
   5382 	case WM_T_82547:
   5383 	case WM_T_82547_2:
   5384 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5385 		    PBA_22K : PBA_30K;
   5386 		for (i = 0; i < sc->sc_nqueues; i++) {
   5387 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5388 			txq->txq_fifo_head = 0;
   5389 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5390 			txq->txq_fifo_size =
   5391 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5392 			txq->txq_fifo_stall = 0;
   5393 		}
   5394 		break;
   5395 	case WM_T_82571:
   5396 	case WM_T_82572:
   5397 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5398 	case WM_T_80003:
   5399 		sc->sc_pba = PBA_32K;
   5400 		break;
   5401 	case WM_T_82573:
   5402 		sc->sc_pba = PBA_12K;
   5403 		break;
   5404 	case WM_T_82574:
   5405 	case WM_T_82583:
   5406 		sc->sc_pba = PBA_20K;
   5407 		break;
   5408 	case WM_T_82576:
   5409 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5410 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5411 		break;
   5412 	case WM_T_82580:
   5413 	case WM_T_I350:
   5414 	case WM_T_I354:
   5415 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5416 		break;
   5417 	case WM_T_I210:
   5418 	case WM_T_I211:
   5419 		sc->sc_pba = PBA_34K;
   5420 		break;
   5421 	case WM_T_ICH8:
   5422 		/* Workaround for a bit corruption issue in FIFO memory */
   5423 		sc->sc_pba = PBA_8K;
   5424 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5425 		break;
   5426 	case WM_T_ICH9:
   5427 	case WM_T_ICH10:
   5428 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5429 		    PBA_14K : PBA_10K;
   5430 		break;
   5431 	case WM_T_PCH:
   5432 	case WM_T_PCH2:	/* XXX 14K? */
   5433 	case WM_T_PCH_LPT:
   5434 	case WM_T_PCH_SPT:
   5435 	case WM_T_PCH_CNP:
   5436 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5437 		    PBA_12K : PBA_26K;
   5438 		break;
   5439 	default:
   5440 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5441 		    PBA_40K : PBA_48K;
   5442 		break;
   5443 	}
   5444 	/*
   5445 	 * Only old or non-multiqueue devices have the PBA register
   5446 	 * XXX Need special handling for 82575.
   5447 	 */
   5448 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5449 	    || (sc->sc_type == WM_T_82575))
   5450 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5451 
   5452 	/* Prevent the PCI-E bus from sticking */
   5453 	if (sc->sc_flags & WM_F_PCIE) {
   5454 		int timeout = 800;
   5455 
   5456 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5457 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5458 
   5459 		while (timeout--) {
   5460 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5461 			    == 0)
   5462 				break;
   5463 			delay(100);
   5464 		}
   5465 		if (timeout == 0)
   5466 			device_printf(sc->sc_dev,
   5467 			    "failed to disable bus mastering\n");
   5468 	}
   5469 
   5470 	/* Set the completion timeout for interface */
   5471 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5472 	    || (sc->sc_type == WM_T_82580)
   5473 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5474 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5475 		wm_set_pcie_completion_timeout(sc);
   5476 
   5477 	/* Clear interrupt */
   5478 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5479 	if (wm_is_using_msix(sc)) {
   5480 		if (sc->sc_type != WM_T_82574) {
   5481 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5482 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5483 		} else
   5484 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5485 	}
   5486 
   5487 	/* Stop the transmit and receive processes. */
   5488 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5489 	sc->sc_rctl &= ~RCTL_EN;
   5490 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5491 	CSR_WRITE_FLUSH(sc);
   5492 
   5493 	/* XXX set_tbi_sbp_82543() */
   5494 
   5495 	delay(10*1000);
   5496 
   5497 	/* Must acquire the MDIO ownership before MAC reset */
   5498 	switch (sc->sc_type) {
   5499 	case WM_T_82573:
   5500 	case WM_T_82574:
   5501 	case WM_T_82583:
   5502 		error = wm_get_hw_semaphore_82573(sc);
   5503 		break;
   5504 	default:
   5505 		break;
   5506 	}
   5507 
   5508 	/*
   5509 	 * 82541 Errata 29? & 82547 Errata 28?
   5510 	 * See also the description about PHY_RST bit in CTRL register
   5511 	 * in 8254x_GBe_SDM.pdf.
   5512 	 */
   5513 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5514 		CSR_WRITE(sc, WMREG_CTRL,
   5515 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5516 		CSR_WRITE_FLUSH(sc);
   5517 		delay(5000);
   5518 	}
   5519 
   5520 	switch (sc->sc_type) {
   5521 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5522 	case WM_T_82541:
   5523 	case WM_T_82541_2:
   5524 	case WM_T_82547:
   5525 	case WM_T_82547_2:
   5526 		/*
   5527 		 * On some chipsets, a reset through a memory-mapped write
   5528 		 * cycle can cause the chip to reset before completing the
   5529 		 * write cycle. This causes major headache that can be avoided
   5530 		 * by issuing the reset via indirect register writes through
   5531 		 * I/O space.
   5532 		 *
   5533 		 * So, if we successfully mapped the I/O BAR at attach time,
   5534 		 * use that. Otherwise, try our luck with a memory-mapped
   5535 		 * reset.
   5536 		 */
   5537 		if (sc->sc_flags & WM_F_IOH_VALID)
   5538 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5539 		else
   5540 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5541 		break;
   5542 	case WM_T_82545_3:
   5543 	case WM_T_82546_3:
   5544 		/* Use the shadow control register on these chips. */
   5545 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5546 		break;
   5547 	case WM_T_80003:
   5548 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5549 		if (sc->phy.acquire(sc) != 0)
   5550 			break;
   5551 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5552 		sc->phy.release(sc);
   5553 		break;
   5554 	case WM_T_ICH8:
   5555 	case WM_T_ICH9:
   5556 	case WM_T_ICH10:
   5557 	case WM_T_PCH:
   5558 	case WM_T_PCH2:
   5559 	case WM_T_PCH_LPT:
   5560 	case WM_T_PCH_SPT:
   5561 	case WM_T_PCH_CNP:
   5562 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5563 		if (wm_phy_resetisblocked(sc) == false) {
   5564 			/*
   5565 			 * Gate automatic PHY configuration by hardware on
   5566 			 * non-managed 82579
   5567 			 */
   5568 			if ((sc->sc_type == WM_T_PCH2)
   5569 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5570 				== 0))
   5571 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5572 
   5573 			reg |= CTRL_PHY_RESET;
   5574 			phy_reset = 1;
   5575 		} else
   5576 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5577 		if (sc->phy.acquire(sc) != 0)
   5578 			break;
   5579 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5580 		/* Don't insert a completion barrier when reset */
   5581 		delay(20*1000);
   5582 		mutex_exit(sc->sc_ich_phymtx);
   5583 		break;
   5584 	case WM_T_82580:
   5585 	case WM_T_I350:
   5586 	case WM_T_I354:
   5587 	case WM_T_I210:
   5588 	case WM_T_I211:
   5589 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5590 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5591 			CSR_WRITE_FLUSH(sc);
   5592 		delay(5000);
   5593 		break;
   5594 	case WM_T_82542_2_0:
   5595 	case WM_T_82542_2_1:
   5596 	case WM_T_82543:
   5597 	case WM_T_82540:
   5598 	case WM_T_82545:
   5599 	case WM_T_82546:
   5600 	case WM_T_82571:
   5601 	case WM_T_82572:
   5602 	case WM_T_82573:
   5603 	case WM_T_82574:
   5604 	case WM_T_82575:
   5605 	case WM_T_82576:
   5606 	case WM_T_82583:
   5607 	default:
   5608 		/* Everything else can safely use the documented method. */
   5609 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5610 		break;
   5611 	}
   5612 
   5613 	/* Must release the MDIO ownership after MAC reset */
   5614 	switch (sc->sc_type) {
   5615 	case WM_T_82573:
   5616 	case WM_T_82574:
   5617 	case WM_T_82583:
   5618 		if (error == 0)
   5619 			wm_put_hw_semaphore_82573(sc);
   5620 		break;
   5621 	default:
   5622 		break;
   5623 	}
   5624 
   5625 	/* Set Phy Config Counter to 50msec */
   5626 	if (sc->sc_type == WM_T_PCH2) {
   5627 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5628 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5629 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5630 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5631 	}
   5632 
   5633 	if (phy_reset != 0)
   5634 		wm_get_cfg_done(sc);
   5635 
   5636 	/* Reload EEPROM */
   5637 	switch (sc->sc_type) {
   5638 	case WM_T_82542_2_0:
   5639 	case WM_T_82542_2_1:
   5640 	case WM_T_82543:
   5641 	case WM_T_82544:
   5642 		delay(10);
   5643 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5644 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5645 		CSR_WRITE_FLUSH(sc);
   5646 		delay(2000);
   5647 		break;
   5648 	case WM_T_82540:
   5649 	case WM_T_82545:
   5650 	case WM_T_82545_3:
   5651 	case WM_T_82546:
   5652 	case WM_T_82546_3:
   5653 		delay(5*1000);
   5654 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5655 		break;
   5656 	case WM_T_82541:
   5657 	case WM_T_82541_2:
   5658 	case WM_T_82547:
   5659 	case WM_T_82547_2:
   5660 		delay(20000);
   5661 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5662 		break;
   5663 	case WM_T_82571:
   5664 	case WM_T_82572:
   5665 	case WM_T_82573:
   5666 	case WM_T_82574:
   5667 	case WM_T_82583:
   5668 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5669 			delay(10);
   5670 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5671 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5672 			CSR_WRITE_FLUSH(sc);
   5673 		}
   5674 		/* check EECD_EE_AUTORD */
   5675 		wm_get_auto_rd_done(sc);
   5676 		/*
   5677 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5678 		 * is set.
   5679 		 */
   5680 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5681 		    || (sc->sc_type == WM_T_82583))
   5682 			delay(25*1000);
   5683 		break;
   5684 	case WM_T_82575:
   5685 	case WM_T_82576:
   5686 	case WM_T_82580:
   5687 	case WM_T_I350:
   5688 	case WM_T_I354:
   5689 	case WM_T_I210:
   5690 	case WM_T_I211:
   5691 	case WM_T_80003:
   5692 		/* check EECD_EE_AUTORD */
   5693 		wm_get_auto_rd_done(sc);
   5694 		break;
   5695 	case WM_T_ICH8:
   5696 	case WM_T_ICH9:
   5697 	case WM_T_ICH10:
   5698 	case WM_T_PCH:
   5699 	case WM_T_PCH2:
   5700 	case WM_T_PCH_LPT:
   5701 	case WM_T_PCH_SPT:
   5702 	case WM_T_PCH_CNP:
   5703 		break;
   5704 	default:
   5705 		panic("%s: unknown type\n", __func__);
   5706 	}
   5707 
   5708 	/* Check whether EEPROM is present or not */
   5709 	switch (sc->sc_type) {
   5710 	case WM_T_82575:
   5711 	case WM_T_82576:
   5712 	case WM_T_82580:
   5713 	case WM_T_I350:
   5714 	case WM_T_I354:
   5715 	case WM_T_ICH8:
   5716 	case WM_T_ICH9:
   5717 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5718 			/* Not found */
   5719 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5720 			if (sc->sc_type == WM_T_82575)
   5721 				wm_reset_init_script_82575(sc);
   5722 		}
   5723 		break;
   5724 	default:
   5725 		break;
   5726 	}
   5727 
   5728 	if (phy_reset != 0)
   5729 		wm_phy_post_reset(sc);
   5730 
   5731 	if ((sc->sc_type == WM_T_82580)
   5732 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5733 		/* Clear global device reset status bit */
   5734 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5735 	}
   5736 
   5737 	/* Clear any pending interrupt events. */
   5738 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5739 	reg = CSR_READ(sc, WMREG_ICR);
   5740 	if (wm_is_using_msix(sc)) {
   5741 		if (sc->sc_type != WM_T_82574) {
   5742 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5743 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5744 		} else
   5745 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5746 	}
   5747 
   5748 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5749 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5750 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5751 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5752 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5753 		reg |= KABGTXD_BGSQLBIAS;
   5754 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5755 	}
   5756 
   5757 	/* Reload sc_ctrl */
   5758 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5759 
   5760 	wm_set_eee(sc);
   5761 
   5762 	/*
   5763 	 * For PCH, this write will make sure that any noise will be detected
   5764 	 * as a CRC error and be dropped rather than show up as a bad packet
   5765 	 * to the DMA engine
   5766 	 */
   5767 	if (sc->sc_type == WM_T_PCH)
   5768 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5769 
   5770 	if (sc->sc_type >= WM_T_82544)
   5771 		CSR_WRITE(sc, WMREG_WUC, 0);
   5772 
   5773 	if (sc->sc_type < WM_T_82575)
   5774 		wm_disable_aspm(sc); /* Workaround for some chips */
   5775 
   5776 	wm_reset_mdicnfg_82580(sc);
   5777 
   5778 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5779 		wm_pll_workaround_i210(sc);
   5780 
   5781 	if (sc->sc_type == WM_T_80003) {
   5782 		/* Default to TRUE to enable the MDIC W/A */
   5783 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5784 
   5785 		rv = wm_kmrn_readreg(sc,
   5786 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5787 		if (rv == 0) {
   5788 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5789 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5790 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5791 			else
   5792 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5793 		}
   5794 	}
   5795 }
   5796 
   5797 /*
   5798  * wm_add_rxbuf:
   5799  *
   5800  *	Add a receive buffer to the indiciated descriptor.
   5801  */
   5802 static int
   5803 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5804 {
   5805 	struct wm_softc *sc = rxq->rxq_sc;
   5806 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5807 	struct mbuf *m;
   5808 	int error;
   5809 
   5810 	KASSERT(mutex_owned(rxq->rxq_lock));
   5811 
   5812 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5813 	if (m == NULL)
   5814 		return ENOBUFS;
   5815 
   5816 	MCLGET(m, M_DONTWAIT);
   5817 	if ((m->m_flags & M_EXT) == 0) {
   5818 		m_freem(m);
   5819 		return ENOBUFS;
   5820 	}
   5821 
   5822 	if (rxs->rxs_mbuf != NULL)
   5823 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5824 
   5825 	rxs->rxs_mbuf = m;
   5826 
   5827 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5828 	/*
   5829 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5830 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5831 	 */
   5832 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5833 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5834 	if (error) {
   5835 		/* XXX XXX XXX */
   5836 		aprint_error_dev(sc->sc_dev,
   5837 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5838 		panic("wm_add_rxbuf");
   5839 	}
   5840 
   5841 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5842 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5843 
   5844 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5845 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5846 			wm_init_rxdesc(rxq, idx);
   5847 	} else
   5848 		wm_init_rxdesc(rxq, idx);
   5849 
   5850 	return 0;
   5851 }
   5852 
   5853 /*
   5854  * wm_rxdrain:
   5855  *
   5856  *	Drain the receive queue.
   5857  */
   5858 static void
   5859 wm_rxdrain(struct wm_rxqueue *rxq)
   5860 {
   5861 	struct wm_softc *sc = rxq->rxq_sc;
   5862 	struct wm_rxsoft *rxs;
   5863 	int i;
   5864 
   5865 	KASSERT(mutex_owned(rxq->rxq_lock));
   5866 
   5867 	for (i = 0; i < WM_NRXDESC; i++) {
   5868 		rxs = &rxq->rxq_soft[i];
   5869 		if (rxs->rxs_mbuf != NULL) {
   5870 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5871 			m_freem(rxs->rxs_mbuf);
   5872 			rxs->rxs_mbuf = NULL;
   5873 		}
   5874 	}
   5875 }
   5876 
   5877 /*
   5878  * Setup registers for RSS.
   5879  *
   5880  * XXX not yet VMDq support
   5881  */
   5882 static void
   5883 wm_init_rss(struct wm_softc *sc)
   5884 {
   5885 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5886 	int i;
   5887 
   5888 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5889 
   5890 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5891 		unsigned int qid, reta_ent;
   5892 
   5893 		qid  = i % sc->sc_nqueues;
   5894 		switch (sc->sc_type) {
   5895 		case WM_T_82574:
   5896 			reta_ent = __SHIFTIN(qid,
   5897 			    RETA_ENT_QINDEX_MASK_82574);
   5898 			break;
   5899 		case WM_T_82575:
   5900 			reta_ent = __SHIFTIN(qid,
   5901 			    RETA_ENT_QINDEX1_MASK_82575);
   5902 			break;
   5903 		default:
   5904 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5905 			break;
   5906 		}
   5907 
   5908 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5909 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5910 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5911 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5912 	}
   5913 
   5914 	rss_getkey((uint8_t *)rss_key);
   5915 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5916 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5917 
   5918 	if (sc->sc_type == WM_T_82574)
   5919 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5920 	else
   5921 		mrqc = MRQC_ENABLE_RSS_MQ;
   5922 
   5923 	/*
   5924 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5925 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5926 	 */
   5927 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5928 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5929 #if 0
   5930 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5931 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5932 #endif
   5933 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5934 
   5935 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5936 }
   5937 
   5938 /*
   5939  * Adjust TX and RX queue numbers which the system actulally uses.
   5940  *
   5941  * The numbers are affected by below parameters.
   5942  *     - The nubmer of hardware queues
   5943  *     - The number of MSI-X vectors (= "nvectors" argument)
   5944  *     - ncpu
   5945  */
   5946 static void
   5947 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5948 {
   5949 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5950 
   5951 	if (nvectors < 2) {
   5952 		sc->sc_nqueues = 1;
   5953 		return;
   5954 	}
   5955 
   5956 	switch (sc->sc_type) {
   5957 	case WM_T_82572:
   5958 		hw_ntxqueues = 2;
   5959 		hw_nrxqueues = 2;
   5960 		break;
   5961 	case WM_T_82574:
   5962 		hw_ntxqueues = 2;
   5963 		hw_nrxqueues = 2;
   5964 		break;
   5965 	case WM_T_82575:
   5966 		hw_ntxqueues = 4;
   5967 		hw_nrxqueues = 4;
   5968 		break;
   5969 	case WM_T_82576:
   5970 		hw_ntxqueues = 16;
   5971 		hw_nrxqueues = 16;
   5972 		break;
   5973 	case WM_T_82580:
   5974 	case WM_T_I350:
   5975 	case WM_T_I354:
   5976 		hw_ntxqueues = 8;
   5977 		hw_nrxqueues = 8;
   5978 		break;
   5979 	case WM_T_I210:
   5980 		hw_ntxqueues = 4;
   5981 		hw_nrxqueues = 4;
   5982 		break;
   5983 	case WM_T_I211:
   5984 		hw_ntxqueues = 2;
   5985 		hw_nrxqueues = 2;
   5986 		break;
   5987 		/*
   5988 		 * The below Ethernet controllers do not support MSI-X;
   5989 		 * this driver doesn't let them use multiqueue.
   5990 		 *     - WM_T_80003
   5991 		 *     - WM_T_ICH8
   5992 		 *     - WM_T_ICH9
   5993 		 *     - WM_T_ICH10
   5994 		 *     - WM_T_PCH
   5995 		 *     - WM_T_PCH2
   5996 		 *     - WM_T_PCH_LPT
   5997 		 */
   5998 	default:
   5999 		hw_ntxqueues = 1;
   6000 		hw_nrxqueues = 1;
   6001 		break;
   6002 	}
   6003 
   6004 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6005 
   6006 	/*
   6007 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6008 	 * the number of queues used actually.
   6009 	 */
   6010 	if (nvectors < hw_nqueues + 1)
   6011 		sc->sc_nqueues = nvectors - 1;
   6012 	else
   6013 		sc->sc_nqueues = hw_nqueues;
   6014 
   6015 	/*
   6016 	 * As queues more than CPUs cannot improve scaling, we limit
   6017 	 * the number of queues used actually.
   6018 	 */
   6019 	if (ncpu < sc->sc_nqueues)
   6020 		sc->sc_nqueues = ncpu;
   6021 }
   6022 
   6023 static inline bool
   6024 wm_is_using_msix(struct wm_softc *sc)
   6025 {
   6026 
   6027 	return (sc->sc_nintrs > 1);
   6028 }
   6029 
   6030 static inline bool
   6031 wm_is_using_multiqueue(struct wm_softc *sc)
   6032 {
   6033 
   6034 	return (sc->sc_nqueues > 1);
   6035 }
   6036 
   6037 static int
   6038 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6039 {
   6040 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6041 
   6042 	wmq->wmq_id = qidx;
   6043 	wmq->wmq_intr_idx = intr_idx;
   6044 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6045 	    wm_handle_queue, wmq);
   6046 	if (wmq->wmq_si != NULL)
   6047 		return 0;
   6048 
   6049 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6050 	    wmq->wmq_id);
   6051 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6052 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6053 	return ENOMEM;
   6054 }
   6055 
   6056 /*
   6057  * Both single interrupt MSI and INTx can use this function.
   6058  */
   6059 static int
   6060 wm_setup_legacy(struct wm_softc *sc)
   6061 {
   6062 	pci_chipset_tag_t pc = sc->sc_pc;
   6063 	const char *intrstr = NULL;
   6064 	char intrbuf[PCI_INTRSTR_LEN];
   6065 	int error;
   6066 
   6067 	error = wm_alloc_txrx_queues(sc);
   6068 	if (error) {
   6069 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6070 		    error);
   6071 		return ENOMEM;
   6072 	}
   6073 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6074 	    sizeof(intrbuf));
   6075 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6076 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6077 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6078 	if (sc->sc_ihs[0] == NULL) {
   6079 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6080 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6081 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6082 		return ENOMEM;
   6083 	}
   6084 
   6085 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6086 	sc->sc_nintrs = 1;
   6087 
   6088 	return wm_softint_establish_queue(sc, 0, 0);
   6089 }
   6090 
   6091 static int
   6092 wm_setup_msix(struct wm_softc *sc)
   6093 {
   6094 	void *vih;
   6095 	kcpuset_t *affinity;
   6096 	int qidx, error, intr_idx, txrx_established;
   6097 	pci_chipset_tag_t pc = sc->sc_pc;
   6098 	const char *intrstr = NULL;
   6099 	char intrbuf[PCI_INTRSTR_LEN];
   6100 	char intr_xname[INTRDEVNAMEBUF];
   6101 
   6102 	if (sc->sc_nqueues < ncpu) {
   6103 		/*
   6104 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6105 		 * interrupts start from CPU#1.
   6106 		 */
   6107 		sc->sc_affinity_offset = 1;
   6108 	} else {
   6109 		/*
   6110 		 * In this case, this device use all CPUs. So, we unify
   6111 		 * affinitied cpu_index to msix vector number for readability.
   6112 		 */
   6113 		sc->sc_affinity_offset = 0;
   6114 	}
   6115 
   6116 	error = wm_alloc_txrx_queues(sc);
   6117 	if (error) {
   6118 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6119 		    error);
   6120 		return ENOMEM;
   6121 	}
   6122 
   6123 	kcpuset_create(&affinity, false);
   6124 	intr_idx = 0;
   6125 
   6126 	/*
   6127 	 * TX and RX
   6128 	 */
   6129 	txrx_established = 0;
   6130 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6131 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6132 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6133 
   6134 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6135 		    sizeof(intrbuf));
   6136 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6137 		    PCI_INTR_MPSAFE, true);
   6138 		memset(intr_xname, 0, sizeof(intr_xname));
   6139 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6140 		    device_xname(sc->sc_dev), qidx);
   6141 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6142 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6143 		if (vih == NULL) {
   6144 			aprint_error_dev(sc->sc_dev,
   6145 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6146 			    intrstr ? " at " : "",
   6147 			    intrstr ? intrstr : "");
   6148 
   6149 			goto fail;
   6150 		}
   6151 		kcpuset_zero(affinity);
   6152 		/* Round-robin affinity */
   6153 		kcpuset_set(affinity, affinity_to);
   6154 		error = interrupt_distribute(vih, affinity, NULL);
   6155 		if (error == 0) {
   6156 			aprint_normal_dev(sc->sc_dev,
   6157 			    "for TX and RX interrupting at %s affinity to %u\n",
   6158 			    intrstr, affinity_to);
   6159 		} else {
   6160 			aprint_normal_dev(sc->sc_dev,
   6161 			    "for TX and RX interrupting at %s\n", intrstr);
   6162 		}
   6163 		sc->sc_ihs[intr_idx] = vih;
   6164 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6165 			goto fail;
   6166 		txrx_established++;
   6167 		intr_idx++;
   6168 	}
   6169 
   6170 	/* LINK */
   6171 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6172 	    sizeof(intrbuf));
   6173 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6174 	memset(intr_xname, 0, sizeof(intr_xname));
   6175 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6176 	    device_xname(sc->sc_dev));
   6177 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6178 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6179 	if (vih == NULL) {
   6180 		aprint_error_dev(sc->sc_dev,
   6181 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6182 		    intrstr ? " at " : "",
   6183 		    intrstr ? intrstr : "");
   6184 
   6185 		goto fail;
   6186 	}
   6187 	/* Keep default affinity to LINK interrupt */
   6188 	aprint_normal_dev(sc->sc_dev,
   6189 	    "for LINK interrupting at %s\n", intrstr);
   6190 	sc->sc_ihs[intr_idx] = vih;
   6191 	sc->sc_link_intr_idx = intr_idx;
   6192 
   6193 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6194 	kcpuset_destroy(affinity);
   6195 	return 0;
   6196 
   6197 fail:
   6198 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6199 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6200 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6201 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6202 	}
   6203 
   6204 	kcpuset_destroy(affinity);
   6205 	return ENOMEM;
   6206 }
   6207 
   6208 static void
   6209 wm_unset_stopping_flags(struct wm_softc *sc)
   6210 {
   6211 	int i;
   6212 
   6213 	KASSERT(mutex_owned(sc->sc_core_lock));
   6214 
   6215 	/* Must unset stopping flags in ascending order. */
   6216 	for (i = 0; i < sc->sc_nqueues; i++) {
   6217 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6218 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6219 
   6220 		mutex_enter(txq->txq_lock);
   6221 		txq->txq_stopping = false;
   6222 		mutex_exit(txq->txq_lock);
   6223 
   6224 		mutex_enter(rxq->rxq_lock);
   6225 		rxq->rxq_stopping = false;
   6226 		mutex_exit(rxq->rxq_lock);
   6227 	}
   6228 
   6229 	sc->sc_core_stopping = false;
   6230 }
   6231 
   6232 static void
   6233 wm_set_stopping_flags(struct wm_softc *sc)
   6234 {
   6235 	int i;
   6236 
   6237 	KASSERT(mutex_owned(sc->sc_core_lock));
   6238 
   6239 	sc->sc_core_stopping = true;
   6240 
   6241 	/* Must set stopping flags in ascending order. */
   6242 	for (i = 0; i < sc->sc_nqueues; i++) {
   6243 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6244 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6245 
   6246 		mutex_enter(rxq->rxq_lock);
   6247 		rxq->rxq_stopping = true;
   6248 		mutex_exit(rxq->rxq_lock);
   6249 
   6250 		mutex_enter(txq->txq_lock);
   6251 		txq->txq_stopping = true;
   6252 		mutex_exit(txq->txq_lock);
   6253 	}
   6254 }
   6255 
   6256 /*
   6257  * Write interrupt interval value to ITR or EITR
   6258  */
   6259 static void
   6260 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6261 {
   6262 
   6263 	if (!wmq->wmq_set_itr)
   6264 		return;
   6265 
   6266 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6267 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6268 
   6269 		/*
   6270 		 * 82575 doesn't have CNT_INGR field.
   6271 		 * So, overwrite counter field by software.
   6272 		 */
   6273 		if (sc->sc_type == WM_T_82575)
   6274 			eitr |= __SHIFTIN(wmq->wmq_itr,
   6275 			    EITR_COUNTER_MASK_82575);
   6276 		else
   6277 			eitr |= EITR_CNT_INGR;
   6278 
   6279 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6280 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6281 		/*
   6282 		 * 82574 has both ITR and EITR. SET EITR when we use
   6283 		 * the multi queue function with MSI-X.
   6284 		 */
   6285 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6286 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6287 	} else {
   6288 		KASSERT(wmq->wmq_id == 0);
   6289 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6290 	}
   6291 
   6292 	wmq->wmq_set_itr = false;
   6293 }
   6294 
   6295 /*
   6296  * TODO
   6297  * Below dynamic calculation of itr is almost the same as Linux igb,
   6298  * however it does not fit to wm(4). So, we will have been disable AIM
   6299  * until we will find appropriate calculation of itr.
   6300  */
   6301 /*
   6302  * Calculate interrupt interval value to be going to write register in
   6303  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6304  */
   6305 static void
   6306 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6307 {
   6308 #ifdef NOTYET
   6309 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6310 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6311 	uint32_t avg_size = 0;
   6312 	uint32_t new_itr;
   6313 
   6314 	if (rxq->rxq_packets)
   6315 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6316 	if (txq->txq_packets)
   6317 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6318 
   6319 	if (avg_size == 0) {
   6320 		new_itr = 450; /* restore default value */
   6321 		goto out;
   6322 	}
   6323 
   6324 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6325 	avg_size += 24;
   6326 
   6327 	/* Don't starve jumbo frames */
   6328 	avg_size = uimin(avg_size, 3000);
   6329 
   6330 	/* Give a little boost to mid-size frames */
   6331 	if ((avg_size > 300) && (avg_size < 1200))
   6332 		new_itr = avg_size / 3;
   6333 	else
   6334 		new_itr = avg_size / 2;
   6335 
   6336 out:
   6337 	/*
   6338 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6339 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6340 	 */
   6341 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6342 		new_itr *= 4;
   6343 
   6344 	if (new_itr != wmq->wmq_itr) {
   6345 		wmq->wmq_itr = new_itr;
   6346 		wmq->wmq_set_itr = true;
   6347 	} else
   6348 		wmq->wmq_set_itr = false;
   6349 
   6350 	rxq->rxq_packets = 0;
   6351 	rxq->rxq_bytes = 0;
   6352 	txq->txq_packets = 0;
   6353 	txq->txq_bytes = 0;
   6354 #endif
   6355 }
   6356 
   6357 static void
   6358 wm_init_sysctls(struct wm_softc *sc)
   6359 {
   6360 	struct sysctllog **log;
   6361 	const struct sysctlnode *rnode, *qnode, *cnode;
   6362 	int i, rv;
   6363 	const char *dvname;
   6364 
   6365 	log = &sc->sc_sysctllog;
   6366 	dvname = device_xname(sc->sc_dev);
   6367 
   6368 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6369 	    0, CTLTYPE_NODE, dvname,
   6370 	    SYSCTL_DESCR("wm information and settings"),
   6371 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6372 	if (rv != 0)
   6373 		goto err;
   6374 
   6375 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6376 	    CTLTYPE_BOOL, "txrx_workqueue",
   6377 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   6378 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6379 	if (rv != 0)
   6380 		goto teardown;
   6381 
   6382 	for (i = 0; i < sc->sc_nqueues; i++) {
   6383 		struct wm_queue *wmq = &sc->sc_queue[i];
   6384 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6385 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6386 
   6387 		snprintf(sc->sc_queue[i].sysctlname,
   6388 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6389 
   6390 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6391 		    0, CTLTYPE_NODE,
   6392 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6393 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6394 			break;
   6395 
   6396 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6397 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6398 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6399 		    NULL, 0, &txq->txq_free,
   6400 		    0, CTL_CREATE, CTL_EOL) != 0)
   6401 			break;
   6402 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6403 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6404 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6405 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6406 		    0, CTL_CREATE, CTL_EOL) != 0)
   6407 			break;
   6408 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6409 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6410 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6411 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6412 		    0, CTL_CREATE, CTL_EOL) != 0)
   6413 			break;
   6414 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6415 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6416 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6417 		    NULL, 0, &txq->txq_next,
   6418 		    0, CTL_CREATE, CTL_EOL) != 0)
   6419 			break;
   6420 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6421 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6422 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6423 		    NULL, 0, &txq->txq_sfree,
   6424 		    0, CTL_CREATE, CTL_EOL) != 0)
   6425 			break;
   6426 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6427 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6428 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6429 		    NULL, 0, &txq->txq_snext,
   6430 		    0, CTL_CREATE, CTL_EOL) != 0)
   6431 			break;
   6432 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6433 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6434 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6435 		    NULL, 0, &txq->txq_sdirty,
   6436 		    0, CTL_CREATE, CTL_EOL) != 0)
   6437 			break;
   6438 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6439 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6440 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6441 		    NULL, 0, &txq->txq_flags,
   6442 		    0, CTL_CREATE, CTL_EOL) != 0)
   6443 			break;
   6444 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6445 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6446 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6447 		    NULL, 0, &txq->txq_stopping,
   6448 		    0, CTL_CREATE, CTL_EOL) != 0)
   6449 			break;
   6450 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6451 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6452 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6453 		    NULL, 0, &txq->txq_sending,
   6454 		    0, CTL_CREATE, CTL_EOL) != 0)
   6455 			break;
   6456 
   6457 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6458 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6459 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6460 		    NULL, 0, &rxq->rxq_ptr,
   6461 		    0, CTL_CREATE, CTL_EOL) != 0)
   6462 			break;
   6463 	}
   6464 
   6465 #ifdef WM_DEBUG
   6466 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6467 	    CTLTYPE_INT, "debug_flags",
   6468 	    SYSCTL_DESCR(
   6469 		    "Debug flags:\n"	\
   6470 		    "\t0x01 LINK\n"	\
   6471 		    "\t0x02 TX\n"	\
   6472 		    "\t0x04 RX\n"	\
   6473 		    "\t0x08 GMII\n"	\
   6474 		    "\t0x10 MANAGE\n"	\
   6475 		    "\t0x20 NVM\n"	\
   6476 		    "\t0x40 INIT\n"	\
   6477 		    "\t0x80 LOCK"),
   6478 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6479 	if (rv != 0)
   6480 		goto teardown;
   6481 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6482 	    CTLTYPE_BOOL, "trigger_reset",
   6483 	    SYSCTL_DESCR("Trigger an interface reset"),
   6484 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6485 	if (rv != 0)
   6486 		goto teardown;
   6487 #endif
   6488 
   6489 	return;
   6490 
   6491 teardown:
   6492 	sysctl_teardown(log);
   6493 err:
   6494 	sc->sc_sysctllog = NULL;
   6495 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6496 	    __func__, rv);
   6497 }
   6498 
   6499 /*
   6500  * wm_init:		[ifnet interface function]
   6501  *
   6502  *	Initialize the interface.
   6503  */
   6504 static int
   6505 wm_init(struct ifnet *ifp)
   6506 {
   6507 	struct wm_softc *sc = ifp->if_softc;
   6508 	int ret;
   6509 
   6510 	KASSERT(IFNET_LOCKED(ifp));
   6511 
   6512 	if (sc->sc_dying)
   6513 		return ENXIO;
   6514 
   6515 	mutex_enter(sc->sc_core_lock);
   6516 	ret = wm_init_locked(ifp);
   6517 	mutex_exit(sc->sc_core_lock);
   6518 
   6519 	return ret;
   6520 }
   6521 
   6522 static int
   6523 wm_init_locked(struct ifnet *ifp)
   6524 {
   6525 	struct wm_softc *sc = ifp->if_softc;
   6526 	struct ethercom *ec = &sc->sc_ethercom;
   6527 	int i, j, trynum, error = 0;
   6528 	uint32_t reg, sfp_mask = 0;
   6529 
   6530 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6531 		device_xname(sc->sc_dev), __func__));
   6532 	KASSERT(IFNET_LOCKED(ifp));
   6533 	KASSERT(mutex_owned(sc->sc_core_lock));
   6534 
   6535 	/*
   6536 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6537 	 * There is a small but measurable benefit to avoiding the adjusment
   6538 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6539 	 * on such platforms.  One possibility is that the DMA itself is
   6540 	 * slightly more efficient if the front of the entire packet (instead
   6541 	 * of the front of the headers) is aligned.
   6542 	 *
   6543 	 * Note we must always set align_tweak to 0 if we are using
   6544 	 * jumbo frames.
   6545 	 */
   6546 #ifdef __NO_STRICT_ALIGNMENT
   6547 	sc->sc_align_tweak = 0;
   6548 #else
   6549 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6550 		sc->sc_align_tweak = 0;
   6551 	else
   6552 		sc->sc_align_tweak = 2;
   6553 #endif /* __NO_STRICT_ALIGNMENT */
   6554 
   6555 	/* Cancel any pending I/O. */
   6556 	wm_stop_locked(ifp, false, false);
   6557 
   6558 	/* Update statistics before reset */
   6559 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6560 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6561 
   6562 	/* >= PCH_SPT hardware workaround before reset. */
   6563 	if (sc->sc_type >= WM_T_PCH_SPT)
   6564 		wm_flush_desc_rings(sc);
   6565 
   6566 	/* Reset the chip to a known state. */
   6567 	wm_reset(sc);
   6568 
   6569 	/*
   6570 	 * AMT based hardware can now take control from firmware
   6571 	 * Do this after reset.
   6572 	 */
   6573 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6574 		wm_get_hw_control(sc);
   6575 
   6576 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6577 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6578 		wm_legacy_irq_quirk_spt(sc);
   6579 
   6580 	/* Init hardware bits */
   6581 	wm_initialize_hardware_bits(sc);
   6582 
   6583 	/* Reset the PHY. */
   6584 	if (sc->sc_flags & WM_F_HAS_MII)
   6585 		wm_gmii_reset(sc);
   6586 
   6587 	if (sc->sc_type >= WM_T_ICH8) {
   6588 		reg = CSR_READ(sc, WMREG_GCR);
   6589 		/*
   6590 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6591 		 * default after reset.
   6592 		 */
   6593 		if (sc->sc_type == WM_T_ICH8)
   6594 			reg |= GCR_NO_SNOOP_ALL;
   6595 		else
   6596 			reg &= ~GCR_NO_SNOOP_ALL;
   6597 		CSR_WRITE(sc, WMREG_GCR, reg);
   6598 	}
   6599 
   6600 	if ((sc->sc_type >= WM_T_ICH8)
   6601 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6602 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6603 
   6604 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6605 		reg |= CTRL_EXT_RO_DIS;
   6606 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6607 	}
   6608 
   6609 	/* Calculate (E)ITR value */
   6610 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6611 		/*
   6612 		 * For NEWQUEUE's EITR (except for 82575).
   6613 		 * 82575's EITR should be set same throttling value as other
   6614 		 * old controllers' ITR because the interrupt/sec calculation
   6615 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6616 		 *
   6617 		 * 82574's EITR should be set same throttling value as ITR.
   6618 		 *
   6619 		 * For N interrupts/sec, set this value to:
   6620 		 * 1,000,000 / N in contrast to ITR throttling value.
   6621 		 */
   6622 		sc->sc_itr_init = 450;
   6623 	} else if (sc->sc_type >= WM_T_82543) {
   6624 		/*
   6625 		 * Set up the interrupt throttling register (units of 256ns)
   6626 		 * Note that a footnote in Intel's documentation says this
   6627 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6628 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6629 		 * that that is also true for the 1024ns units of the other
   6630 		 * interrupt-related timer registers -- so, really, we ought
   6631 		 * to divide this value by 4 when the link speed is low.
   6632 		 *
   6633 		 * XXX implement this division at link speed change!
   6634 		 */
   6635 
   6636 		/*
   6637 		 * For N interrupts/sec, set this value to:
   6638 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6639 		 * absolute and packet timer values to this value
   6640 		 * divided by 4 to get "simple timer" behavior.
   6641 		 */
   6642 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6643 	}
   6644 
   6645 	error = wm_init_txrx_queues(sc);
   6646 	if (error)
   6647 		goto out;
   6648 
   6649 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6650 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6651 	    (sc->sc_type >= WM_T_82575))
   6652 		wm_serdes_power_up_link_82575(sc);
   6653 
   6654 	/* Clear out the VLAN table -- we don't use it (yet). */
   6655 	CSR_WRITE(sc, WMREG_VET, 0);
   6656 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6657 		trynum = 10; /* Due to hw errata */
   6658 	else
   6659 		trynum = 1;
   6660 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6661 		for (j = 0; j < trynum; j++)
   6662 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6663 
   6664 	/*
   6665 	 * Set up flow-control parameters.
   6666 	 *
   6667 	 * XXX Values could probably stand some tuning.
   6668 	 */
   6669 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6670 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6671 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6672 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6673 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6674 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6675 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6676 	}
   6677 
   6678 	sc->sc_fcrtl = FCRTL_DFLT;
   6679 	if (sc->sc_type < WM_T_82543) {
   6680 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6681 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6682 	} else {
   6683 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6684 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6685 	}
   6686 
   6687 	if (sc->sc_type == WM_T_80003)
   6688 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6689 	else
   6690 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6691 
   6692 	/* Writes the control register. */
   6693 	wm_set_vlan(sc);
   6694 
   6695 	if (sc->sc_flags & WM_F_HAS_MII) {
   6696 		uint16_t kmreg;
   6697 
   6698 		switch (sc->sc_type) {
   6699 		case WM_T_80003:
   6700 		case WM_T_ICH8:
   6701 		case WM_T_ICH9:
   6702 		case WM_T_ICH10:
   6703 		case WM_T_PCH:
   6704 		case WM_T_PCH2:
   6705 		case WM_T_PCH_LPT:
   6706 		case WM_T_PCH_SPT:
   6707 		case WM_T_PCH_CNP:
   6708 			/*
   6709 			 * Set the mac to wait the maximum time between each
   6710 			 * iteration and increase the max iterations when
   6711 			 * polling the phy; this fixes erroneous timeouts at
   6712 			 * 10Mbps.
   6713 			 */
   6714 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6715 			    0xFFFF);
   6716 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6717 			    &kmreg);
   6718 			kmreg |= 0x3F;
   6719 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6720 			    kmreg);
   6721 			break;
   6722 		default:
   6723 			break;
   6724 		}
   6725 
   6726 		if (sc->sc_type == WM_T_80003) {
   6727 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6728 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6729 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6730 
   6731 			/* Bypass RX and TX FIFOs */
   6732 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6733 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6734 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6735 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6736 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6737 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6738 		}
   6739 	}
   6740 #if 0
   6741 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6742 #endif
   6743 
   6744 	/* Set up checksum offload parameters. */
   6745 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6746 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6747 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6748 		reg |= RXCSUM_IPOFL;
   6749 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6750 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6751 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6752 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6753 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6754 
   6755 	/* Set registers about MSI-X */
   6756 	if (wm_is_using_msix(sc)) {
   6757 		uint32_t ivar, qintr_idx;
   6758 		struct wm_queue *wmq;
   6759 		unsigned int qid;
   6760 
   6761 		if (sc->sc_type == WM_T_82575) {
   6762 			/* Interrupt control */
   6763 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6764 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6765 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6766 
   6767 			/* TX and RX */
   6768 			for (i = 0; i < sc->sc_nqueues; i++) {
   6769 				wmq = &sc->sc_queue[i];
   6770 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6771 				    EITR_TX_QUEUE(wmq->wmq_id)
   6772 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6773 			}
   6774 			/* Link status */
   6775 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6776 			    EITR_OTHER);
   6777 		} else if (sc->sc_type == WM_T_82574) {
   6778 			/* Interrupt control */
   6779 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6780 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6781 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6782 
   6783 			/*
   6784 			 * Work around issue with spurious interrupts
   6785 			 * in MSI-X mode.
   6786 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6787 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6788 			 */
   6789 			reg = CSR_READ(sc, WMREG_RFCTL);
   6790 			reg |= WMREG_RFCTL_ACKDIS;
   6791 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6792 
   6793 			ivar = 0;
   6794 			/* TX and RX */
   6795 			for (i = 0; i < sc->sc_nqueues; i++) {
   6796 				wmq = &sc->sc_queue[i];
   6797 				qid = wmq->wmq_id;
   6798 				qintr_idx = wmq->wmq_intr_idx;
   6799 
   6800 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6801 				    IVAR_TX_MASK_Q_82574(qid));
   6802 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6803 				    IVAR_RX_MASK_Q_82574(qid));
   6804 			}
   6805 			/* Link status */
   6806 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6807 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6808 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6809 		} else {
   6810 			/* Interrupt control */
   6811 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6812 			    | GPIE_EIAME | GPIE_PBA);
   6813 
   6814 			switch (sc->sc_type) {
   6815 			case WM_T_82580:
   6816 			case WM_T_I350:
   6817 			case WM_T_I354:
   6818 			case WM_T_I210:
   6819 			case WM_T_I211:
   6820 				/* TX and RX */
   6821 				for (i = 0; i < sc->sc_nqueues; i++) {
   6822 					wmq = &sc->sc_queue[i];
   6823 					qid = wmq->wmq_id;
   6824 					qintr_idx = wmq->wmq_intr_idx;
   6825 
   6826 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6827 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6828 					ivar |= __SHIFTIN((qintr_idx
   6829 						| IVAR_VALID),
   6830 					    IVAR_TX_MASK_Q(qid));
   6831 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6832 					ivar |= __SHIFTIN((qintr_idx
   6833 						| IVAR_VALID),
   6834 					    IVAR_RX_MASK_Q(qid));
   6835 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6836 				}
   6837 				break;
   6838 			case WM_T_82576:
   6839 				/* TX and RX */
   6840 				for (i = 0; i < sc->sc_nqueues; i++) {
   6841 					wmq = &sc->sc_queue[i];
   6842 					qid = wmq->wmq_id;
   6843 					qintr_idx = wmq->wmq_intr_idx;
   6844 
   6845 					ivar = CSR_READ(sc,
   6846 					    WMREG_IVAR_Q_82576(qid));
   6847 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6848 					ivar |= __SHIFTIN((qintr_idx
   6849 						| IVAR_VALID),
   6850 					    IVAR_TX_MASK_Q_82576(qid));
   6851 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6852 					ivar |= __SHIFTIN((qintr_idx
   6853 						| IVAR_VALID),
   6854 					    IVAR_RX_MASK_Q_82576(qid));
   6855 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6856 					    ivar);
   6857 				}
   6858 				break;
   6859 			default:
   6860 				break;
   6861 			}
   6862 
   6863 			/* Link status */
   6864 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6865 			    IVAR_MISC_OTHER);
   6866 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6867 		}
   6868 
   6869 		if (wm_is_using_multiqueue(sc)) {
   6870 			wm_init_rss(sc);
   6871 
   6872 			/*
   6873 			** NOTE: Receive Full-Packet Checksum Offload
   6874 			** is mutually exclusive with Multiqueue. However
   6875 			** this is not the same as TCP/IP checksums which
   6876 			** still work.
   6877 			*/
   6878 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6879 			reg |= RXCSUM_PCSD;
   6880 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6881 		}
   6882 	}
   6883 
   6884 	/* Set up the interrupt registers. */
   6885 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6886 
   6887 	/* Enable SFP module insertion interrupt if it's required */
   6888 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6889 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6890 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6891 		sfp_mask = ICR_GPI(0);
   6892 	}
   6893 
   6894 	if (wm_is_using_msix(sc)) {
   6895 		uint32_t mask;
   6896 		struct wm_queue *wmq;
   6897 
   6898 		switch (sc->sc_type) {
   6899 		case WM_T_82574:
   6900 			mask = 0;
   6901 			for (i = 0; i < sc->sc_nqueues; i++) {
   6902 				wmq = &sc->sc_queue[i];
   6903 				mask |= ICR_TXQ(wmq->wmq_id);
   6904 				mask |= ICR_RXQ(wmq->wmq_id);
   6905 			}
   6906 			mask |= ICR_OTHER;
   6907 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6908 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6909 			break;
   6910 		default:
   6911 			if (sc->sc_type == WM_T_82575) {
   6912 				mask = 0;
   6913 				for (i = 0; i < sc->sc_nqueues; i++) {
   6914 					wmq = &sc->sc_queue[i];
   6915 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6916 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6917 				}
   6918 				mask |= EITR_OTHER;
   6919 			} else {
   6920 				mask = 0;
   6921 				for (i = 0; i < sc->sc_nqueues; i++) {
   6922 					wmq = &sc->sc_queue[i];
   6923 					mask |= 1 << wmq->wmq_intr_idx;
   6924 				}
   6925 				mask |= 1 << sc->sc_link_intr_idx;
   6926 			}
   6927 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6928 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6929 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6930 
   6931 			/* For other interrupts */
   6932 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6933 			break;
   6934 		}
   6935 	} else {
   6936 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6937 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6938 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6939 	}
   6940 
   6941 	/* Set up the inter-packet gap. */
   6942 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6943 
   6944 	if (sc->sc_type >= WM_T_82543) {
   6945 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6946 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6947 			wm_itrs_writereg(sc, wmq);
   6948 		}
   6949 		/*
   6950 		 * Link interrupts occur much less than TX
   6951 		 * interrupts and RX interrupts. So, we don't
   6952 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6953 		 * FreeBSD's if_igb.
   6954 		 */
   6955 	}
   6956 
   6957 	/* Set the VLAN EtherType. */
   6958 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6959 
   6960 	/*
   6961 	 * Set up the transmit control register; we start out with
   6962 	 * a collision distance suitable for FDX, but update it when
   6963 	 * we resolve the media type.
   6964 	 */
   6965 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6966 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6967 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6968 	if (sc->sc_type >= WM_T_82571)
   6969 		sc->sc_tctl |= TCTL_MULR;
   6970 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6971 
   6972 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6973 		/* Write TDT after TCTL.EN is set. See the document. */
   6974 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6975 	}
   6976 
   6977 	if (sc->sc_type == WM_T_80003) {
   6978 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6979 		reg &= ~TCTL_EXT_GCEX_MASK;
   6980 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6981 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6982 	}
   6983 
   6984 	/* Set the media. */
   6985 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6986 		goto out;
   6987 
   6988 	/* Configure for OS presence */
   6989 	wm_init_manageability(sc);
   6990 
   6991 	/*
   6992 	 * Set up the receive control register; we actually program the
   6993 	 * register when we set the receive filter. Use multicast address
   6994 	 * offset type 0.
   6995 	 *
   6996 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6997 	 * don't enable that feature.
   6998 	 */
   6999 	sc->sc_mchash_type = 0;
   7000 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7001 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7002 
   7003 	/* 82574 use one buffer extended Rx descriptor. */
   7004 	if (sc->sc_type == WM_T_82574)
   7005 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7006 
   7007 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7008 		sc->sc_rctl |= RCTL_SECRC;
   7009 
   7010 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7011 	    && (ifp->if_mtu > ETHERMTU)) {
   7012 		sc->sc_rctl |= RCTL_LPE;
   7013 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7014 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7015 	}
   7016 
   7017 	if (MCLBYTES == 2048)
   7018 		sc->sc_rctl |= RCTL_2k;
   7019 	else {
   7020 		if (sc->sc_type >= WM_T_82543) {
   7021 			switch (MCLBYTES) {
   7022 			case 4096:
   7023 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7024 				break;
   7025 			case 8192:
   7026 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7027 				break;
   7028 			case 16384:
   7029 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7030 				break;
   7031 			default:
   7032 				panic("wm_init: MCLBYTES %d unsupported",
   7033 				    MCLBYTES);
   7034 				break;
   7035 			}
   7036 		} else
   7037 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7038 	}
   7039 
   7040 	/* Enable ECC */
   7041 	switch (sc->sc_type) {
   7042 	case WM_T_82571:
   7043 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7044 		reg |= PBA_ECC_CORR_EN;
   7045 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7046 		break;
   7047 	case WM_T_PCH_LPT:
   7048 	case WM_T_PCH_SPT:
   7049 	case WM_T_PCH_CNP:
   7050 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7051 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7052 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7053 
   7054 		sc->sc_ctrl |= CTRL_MEHE;
   7055 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7056 		break;
   7057 	default:
   7058 		break;
   7059 	}
   7060 
   7061 	/*
   7062 	 * Set the receive filter.
   7063 	 *
   7064 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7065 	 * the setting of RCTL.EN in wm_set_filter()
   7066 	 */
   7067 	wm_set_filter(sc);
   7068 
   7069 	/* On 575 and later set RDT only if RX enabled */
   7070 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7071 		int qidx;
   7072 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7073 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7074 			for (i = 0; i < WM_NRXDESC; i++) {
   7075 				mutex_enter(rxq->rxq_lock);
   7076 				wm_init_rxdesc(rxq, i);
   7077 				mutex_exit(rxq->rxq_lock);
   7078 
   7079 			}
   7080 		}
   7081 	}
   7082 
   7083 	wm_unset_stopping_flags(sc);
   7084 
   7085 	/* Start the one second link check clock. */
   7086 	callout_schedule(&sc->sc_tick_ch, hz);
   7087 
   7088 	/*
   7089 	 * ...all done! (IFNET_LOCKED asserted above.)
   7090 	 */
   7091 	ifp->if_flags |= IFF_RUNNING;
   7092 
   7093 out:
   7094 	/* Save last flags for the callback */
   7095 	sc->sc_if_flags = ifp->if_flags;
   7096 	sc->sc_ec_capenable = ec->ec_capenable;
   7097 	if (error)
   7098 		log(LOG_ERR, "%s: interface not running\n",
   7099 		    device_xname(sc->sc_dev));
   7100 	return error;
   7101 }
   7102 
   7103 /*
   7104  * wm_stop:		[ifnet interface function]
   7105  *
   7106  *	Stop transmission on the interface.
   7107  */
   7108 static void
   7109 wm_stop(struct ifnet *ifp, int disable)
   7110 {
   7111 	struct wm_softc *sc = ifp->if_softc;
   7112 
   7113 	ASSERT_SLEEPABLE();
   7114 	KASSERT(IFNET_LOCKED(ifp));
   7115 
   7116 	mutex_enter(sc->sc_core_lock);
   7117 	wm_stop_locked(ifp, disable ? true : false, true);
   7118 	mutex_exit(sc->sc_core_lock);
   7119 
   7120 	/*
   7121 	 * After wm_set_stopping_flags(), it is guaranteed that
   7122 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7123 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7124 	 * because it can sleep...
   7125 	 * so, call workqueue_wait() here.
   7126 	 */
   7127 	for (int i = 0; i < sc->sc_nqueues; i++)
   7128 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7129 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7130 }
   7131 
   7132 static void
   7133 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7134 {
   7135 	struct wm_softc *sc = ifp->if_softc;
   7136 	struct wm_txsoft *txs;
   7137 	int i, qidx;
   7138 
   7139 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7140 		device_xname(sc->sc_dev), __func__));
   7141 	KASSERT(IFNET_LOCKED(ifp));
   7142 	KASSERT(mutex_owned(sc->sc_core_lock));
   7143 
   7144 	wm_set_stopping_flags(sc);
   7145 
   7146 	if (sc->sc_flags & WM_F_HAS_MII) {
   7147 		/* Down the MII. */
   7148 		mii_down(&sc->sc_mii);
   7149 	} else {
   7150 #if 0
   7151 		/* Should we clear PHY's status properly? */
   7152 		wm_reset(sc);
   7153 #endif
   7154 	}
   7155 
   7156 	/* Stop the transmit and receive processes. */
   7157 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7158 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7159 	sc->sc_rctl &= ~RCTL_EN;
   7160 
   7161 	/*
   7162 	 * Clear the interrupt mask to ensure the device cannot assert its
   7163 	 * interrupt line.
   7164 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7165 	 * service any currently pending or shared interrupt.
   7166 	 */
   7167 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7168 	sc->sc_icr = 0;
   7169 	if (wm_is_using_msix(sc)) {
   7170 		if (sc->sc_type != WM_T_82574) {
   7171 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7172 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7173 		} else
   7174 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7175 	}
   7176 
   7177 	/*
   7178 	 * Stop callouts after interrupts are disabled; if we have
   7179 	 * to wait for them, we will be releasing the CORE_LOCK
   7180 	 * briefly, which will unblock interrupts on the current CPU.
   7181 	 */
   7182 
   7183 	/* Stop the one second clock. */
   7184 	if (wait)
   7185 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7186 	else
   7187 		callout_stop(&sc->sc_tick_ch);
   7188 
   7189 	/* Stop the 82547 Tx FIFO stall check timer. */
   7190 	if (sc->sc_type == WM_T_82547) {
   7191 		if (wait)
   7192 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7193 		else
   7194 			callout_stop(&sc->sc_txfifo_ch);
   7195 	}
   7196 
   7197 	/* Release any queued transmit buffers. */
   7198 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7199 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7200 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7201 		struct mbuf *m;
   7202 
   7203 		mutex_enter(txq->txq_lock);
   7204 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7205 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7206 			txs = &txq->txq_soft[i];
   7207 			if (txs->txs_mbuf != NULL) {
   7208 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7209 				m_freem(txs->txs_mbuf);
   7210 				txs->txs_mbuf = NULL;
   7211 			}
   7212 		}
   7213 		/* Drain txq_interq */
   7214 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7215 			m_freem(m);
   7216 		mutex_exit(txq->txq_lock);
   7217 	}
   7218 
   7219 	/* Mark the interface as down and cancel the watchdog timer. */
   7220 	ifp->if_flags &= ~IFF_RUNNING;
   7221 	sc->sc_if_flags = ifp->if_flags;
   7222 
   7223 	if (disable) {
   7224 		for (i = 0; i < sc->sc_nqueues; i++) {
   7225 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7226 			mutex_enter(rxq->rxq_lock);
   7227 			wm_rxdrain(rxq);
   7228 			mutex_exit(rxq->rxq_lock);
   7229 		}
   7230 	}
   7231 
   7232 #if 0 /* notyet */
   7233 	if (sc->sc_type >= WM_T_82544)
   7234 		CSR_WRITE(sc, WMREG_WUC, 0);
   7235 #endif
   7236 }
   7237 
   7238 static void
   7239 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7240 {
   7241 	struct mbuf *m;
   7242 	int i;
   7243 
   7244 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7245 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7246 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7247 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7248 		    m->m_data, m->m_len, m->m_flags);
   7249 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7250 	    i, i == 1 ? "" : "s");
   7251 }
   7252 
   7253 /*
   7254  * wm_82547_txfifo_stall:
   7255  *
   7256  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7257  *	reset the FIFO pointers, and restart packet transmission.
   7258  */
   7259 static void
   7260 wm_82547_txfifo_stall(void *arg)
   7261 {
   7262 	struct wm_softc *sc = arg;
   7263 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7264 
   7265 	mutex_enter(txq->txq_lock);
   7266 
   7267 	if (txq->txq_stopping)
   7268 		goto out;
   7269 
   7270 	if (txq->txq_fifo_stall) {
   7271 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7272 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7273 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7274 			/*
   7275 			 * Packets have drained.  Stop transmitter, reset
   7276 			 * FIFO pointers, restart transmitter, and kick
   7277 			 * the packet queue.
   7278 			 */
   7279 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7280 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7281 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7282 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7283 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7284 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7285 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7286 			CSR_WRITE_FLUSH(sc);
   7287 
   7288 			txq->txq_fifo_head = 0;
   7289 			txq->txq_fifo_stall = 0;
   7290 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7291 		} else {
   7292 			/*
   7293 			 * Still waiting for packets to drain; try again in
   7294 			 * another tick.
   7295 			 */
   7296 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7297 		}
   7298 	}
   7299 
   7300 out:
   7301 	mutex_exit(txq->txq_lock);
   7302 }
   7303 
   7304 /*
   7305  * wm_82547_txfifo_bugchk:
   7306  *
   7307  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7308  *	prevent enqueueing a packet that would wrap around the end
   7309  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7310  *
   7311  *	We do this by checking the amount of space before the end
   7312  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7313  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7314  *	the internal FIFO pointers to the beginning, and restart
   7315  *	transmission on the interface.
   7316  */
   7317 #define	WM_FIFO_HDR		0x10
   7318 #define	WM_82547_PAD_LEN	0x3e0
   7319 static int
   7320 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7321 {
   7322 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7323 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7324 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7325 
   7326 	/* Just return if already stalled. */
   7327 	if (txq->txq_fifo_stall)
   7328 		return 1;
   7329 
   7330 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7331 		/* Stall only occurs in half-duplex mode. */
   7332 		goto send_packet;
   7333 	}
   7334 
   7335 	if (len >= WM_82547_PAD_LEN + space) {
   7336 		txq->txq_fifo_stall = 1;
   7337 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7338 		return 1;
   7339 	}
   7340 
   7341 send_packet:
   7342 	txq->txq_fifo_head += len;
   7343 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7344 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7345 
   7346 	return 0;
   7347 }
   7348 
   7349 static int
   7350 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7351 {
   7352 	int error;
   7353 
   7354 	/*
   7355 	 * Allocate the control data structures, and create and load the
   7356 	 * DMA map for it.
   7357 	 *
   7358 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7359 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7360 	 * both sets within the same 4G segment.
   7361 	 */
   7362 	if (sc->sc_type < WM_T_82544)
   7363 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7364 	else
   7365 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7366 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7367 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7368 	else
   7369 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7370 
   7371 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7372 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7373 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7374 		aprint_error_dev(sc->sc_dev,
   7375 		    "unable to allocate TX control data, error = %d\n",
   7376 		    error);
   7377 		goto fail_0;
   7378 	}
   7379 
   7380 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7381 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7382 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7383 		aprint_error_dev(sc->sc_dev,
   7384 		    "unable to map TX control data, error = %d\n", error);
   7385 		goto fail_1;
   7386 	}
   7387 
   7388 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7389 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7390 		aprint_error_dev(sc->sc_dev,
   7391 		    "unable to create TX control data DMA map, error = %d\n",
   7392 		    error);
   7393 		goto fail_2;
   7394 	}
   7395 
   7396 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7397 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7398 		aprint_error_dev(sc->sc_dev,
   7399 		    "unable to load TX control data DMA map, error = %d\n",
   7400 		    error);
   7401 		goto fail_3;
   7402 	}
   7403 
   7404 	return 0;
   7405 
   7406 fail_3:
   7407 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7408 fail_2:
   7409 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7410 	    WM_TXDESCS_SIZE(txq));
   7411 fail_1:
   7412 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7413 fail_0:
   7414 	return error;
   7415 }
   7416 
   7417 static void
   7418 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7419 {
   7420 
   7421 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7422 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7423 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7424 	    WM_TXDESCS_SIZE(txq));
   7425 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7426 }
   7427 
   7428 static int
   7429 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7430 {
   7431 	int error;
   7432 	size_t rxq_descs_size;
   7433 
   7434 	/*
   7435 	 * Allocate the control data structures, and create and load the
   7436 	 * DMA map for it.
   7437 	 *
   7438 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7439 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7440 	 * both sets within the same 4G segment.
   7441 	 */
   7442 	rxq->rxq_ndesc = WM_NRXDESC;
   7443 	if (sc->sc_type == WM_T_82574)
   7444 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7445 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7446 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7447 	else
   7448 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7449 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7450 
   7451 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7452 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7453 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7454 		aprint_error_dev(sc->sc_dev,
   7455 		    "unable to allocate RX control data, error = %d\n",
   7456 		    error);
   7457 		goto fail_0;
   7458 	}
   7459 
   7460 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7461 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7462 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7463 		aprint_error_dev(sc->sc_dev,
   7464 		    "unable to map RX control data, error = %d\n", error);
   7465 		goto fail_1;
   7466 	}
   7467 
   7468 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7469 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7470 		aprint_error_dev(sc->sc_dev,
   7471 		    "unable to create RX control data DMA map, error = %d\n",
   7472 		    error);
   7473 		goto fail_2;
   7474 	}
   7475 
   7476 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7477 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7478 		aprint_error_dev(sc->sc_dev,
   7479 		    "unable to load RX control data DMA map, error = %d\n",
   7480 		    error);
   7481 		goto fail_3;
   7482 	}
   7483 
   7484 	return 0;
   7485 
   7486  fail_3:
   7487 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7488  fail_2:
   7489 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7490 	    rxq_descs_size);
   7491  fail_1:
   7492 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7493  fail_0:
   7494 	return error;
   7495 }
   7496 
   7497 static void
   7498 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7499 {
   7500 
   7501 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7502 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7503 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7504 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7505 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7506 }
   7507 
   7508 
   7509 static int
   7510 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7511 {
   7512 	int i, error;
   7513 
   7514 	/* Create the transmit buffer DMA maps. */
   7515 	WM_TXQUEUELEN(txq) =
   7516 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7517 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7518 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7519 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7520 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7521 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7522 			aprint_error_dev(sc->sc_dev,
   7523 			    "unable to create Tx DMA map %d, error = %d\n",
   7524 			    i, error);
   7525 			goto fail;
   7526 		}
   7527 	}
   7528 
   7529 	return 0;
   7530 
   7531 fail:
   7532 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7533 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7534 			bus_dmamap_destroy(sc->sc_dmat,
   7535 			    txq->txq_soft[i].txs_dmamap);
   7536 	}
   7537 	return error;
   7538 }
   7539 
   7540 static void
   7541 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7542 {
   7543 	int i;
   7544 
   7545 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7546 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7547 			bus_dmamap_destroy(sc->sc_dmat,
   7548 			    txq->txq_soft[i].txs_dmamap);
   7549 	}
   7550 }
   7551 
   7552 static int
   7553 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7554 {
   7555 	int i, error;
   7556 
   7557 	/* Create the receive buffer DMA maps. */
   7558 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7559 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7560 			    MCLBYTES, 0, 0,
   7561 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7562 			aprint_error_dev(sc->sc_dev,
   7563 			    "unable to create Rx DMA map %d error = %d\n",
   7564 			    i, error);
   7565 			goto fail;
   7566 		}
   7567 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7568 	}
   7569 
   7570 	return 0;
   7571 
   7572  fail:
   7573 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7574 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7575 			bus_dmamap_destroy(sc->sc_dmat,
   7576 			    rxq->rxq_soft[i].rxs_dmamap);
   7577 	}
   7578 	return error;
   7579 }
   7580 
   7581 static void
   7582 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7583 {
   7584 	int i;
   7585 
   7586 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7587 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7588 			bus_dmamap_destroy(sc->sc_dmat,
   7589 			    rxq->rxq_soft[i].rxs_dmamap);
   7590 	}
   7591 }
   7592 
   7593 /*
   7594  * wm_alloc_quques:
   7595  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7596  */
   7597 static int
   7598 wm_alloc_txrx_queues(struct wm_softc *sc)
   7599 {
   7600 	int i, error, tx_done, rx_done;
   7601 
   7602 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7603 	    KM_SLEEP);
   7604 	if (sc->sc_queue == NULL) {
   7605 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7606 		error = ENOMEM;
   7607 		goto fail_0;
   7608 	}
   7609 
   7610 	/* For transmission */
   7611 	error = 0;
   7612 	tx_done = 0;
   7613 	for (i = 0; i < sc->sc_nqueues; i++) {
   7614 #ifdef WM_EVENT_COUNTERS
   7615 		int j;
   7616 		const char *xname;
   7617 #endif
   7618 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7619 		txq->txq_sc = sc;
   7620 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7621 
   7622 		error = wm_alloc_tx_descs(sc, txq);
   7623 		if (error)
   7624 			break;
   7625 		error = wm_alloc_tx_buffer(sc, txq);
   7626 		if (error) {
   7627 			wm_free_tx_descs(sc, txq);
   7628 			break;
   7629 		}
   7630 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7631 		if (txq->txq_interq == NULL) {
   7632 			wm_free_tx_descs(sc, txq);
   7633 			wm_free_tx_buffer(sc, txq);
   7634 			error = ENOMEM;
   7635 			break;
   7636 		}
   7637 
   7638 #ifdef WM_EVENT_COUNTERS
   7639 		xname = device_xname(sc->sc_dev);
   7640 
   7641 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7642 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7643 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7644 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7645 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7646 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7647 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7648 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7649 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7650 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7651 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7652 
   7653 		for (j = 0; j < WM_NTXSEGS; j++) {
   7654 			snprintf(txq->txq_txseg_evcnt_names[j],
   7655 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   7656 			    "txq%02dtxseg%d", i, j);
   7657 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   7658 			    EVCNT_TYPE_MISC,
   7659 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7660 		}
   7661 
   7662 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7663 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7664 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7665 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7666 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7667 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7668 #endif /* WM_EVENT_COUNTERS */
   7669 
   7670 		tx_done++;
   7671 	}
   7672 	if (error)
   7673 		goto fail_1;
   7674 
   7675 	/* For receive */
   7676 	error = 0;
   7677 	rx_done = 0;
   7678 	for (i = 0; i < sc->sc_nqueues; i++) {
   7679 #ifdef WM_EVENT_COUNTERS
   7680 		const char *xname;
   7681 #endif
   7682 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7683 		rxq->rxq_sc = sc;
   7684 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7685 
   7686 		error = wm_alloc_rx_descs(sc, rxq);
   7687 		if (error)
   7688 			break;
   7689 
   7690 		error = wm_alloc_rx_buffer(sc, rxq);
   7691 		if (error) {
   7692 			wm_free_rx_descs(sc, rxq);
   7693 			break;
   7694 		}
   7695 
   7696 #ifdef WM_EVENT_COUNTERS
   7697 		xname = device_xname(sc->sc_dev);
   7698 
   7699 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7700 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7701 
   7702 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7703 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7704 #endif /* WM_EVENT_COUNTERS */
   7705 
   7706 		rx_done++;
   7707 	}
   7708 	if (error)
   7709 		goto fail_2;
   7710 
   7711 	return 0;
   7712 
   7713 fail_2:
   7714 	for (i = 0; i < rx_done; i++) {
   7715 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7716 		wm_free_rx_buffer(sc, rxq);
   7717 		wm_free_rx_descs(sc, rxq);
   7718 		if (rxq->rxq_lock)
   7719 			mutex_obj_free(rxq->rxq_lock);
   7720 	}
   7721 fail_1:
   7722 	for (i = 0; i < tx_done; i++) {
   7723 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7724 		pcq_destroy(txq->txq_interq);
   7725 		wm_free_tx_buffer(sc, txq);
   7726 		wm_free_tx_descs(sc, txq);
   7727 		if (txq->txq_lock)
   7728 			mutex_obj_free(txq->txq_lock);
   7729 	}
   7730 
   7731 	kmem_free(sc->sc_queue,
   7732 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7733 fail_0:
   7734 	return error;
   7735 }
   7736 
   7737 /*
   7738  * wm_free_quques:
   7739  *	Free {tx,rx}descs and {tx,rx} buffers
   7740  */
   7741 static void
   7742 wm_free_txrx_queues(struct wm_softc *sc)
   7743 {
   7744 	int i;
   7745 
   7746 	for (i = 0; i < sc->sc_nqueues; i++) {
   7747 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7748 
   7749 #ifdef WM_EVENT_COUNTERS
   7750 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7751 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7752 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7753 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7754 #endif /* WM_EVENT_COUNTERS */
   7755 
   7756 		wm_free_rx_buffer(sc, rxq);
   7757 		wm_free_rx_descs(sc, rxq);
   7758 		if (rxq->rxq_lock)
   7759 			mutex_obj_free(rxq->rxq_lock);
   7760 	}
   7761 
   7762 	for (i = 0; i < sc->sc_nqueues; i++) {
   7763 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7764 		struct mbuf *m;
   7765 #ifdef WM_EVENT_COUNTERS
   7766 		int j;
   7767 
   7768 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7769 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7770 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7771 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7772 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7773 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7774 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7775 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7776 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7777 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7778 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7779 
   7780 		for (j = 0; j < WM_NTXSEGS; j++)
   7781 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7782 
   7783 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7784 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7785 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7786 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7787 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7788 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7789 #endif /* WM_EVENT_COUNTERS */
   7790 
   7791 		/* Drain txq_interq */
   7792 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7793 			m_freem(m);
   7794 		pcq_destroy(txq->txq_interq);
   7795 
   7796 		wm_free_tx_buffer(sc, txq);
   7797 		wm_free_tx_descs(sc, txq);
   7798 		if (txq->txq_lock)
   7799 			mutex_obj_free(txq->txq_lock);
   7800 	}
   7801 
   7802 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7803 }
   7804 
   7805 static void
   7806 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7807 {
   7808 
   7809 	KASSERT(mutex_owned(txq->txq_lock));
   7810 
   7811 	/* Initialize the transmit descriptor ring. */
   7812 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7813 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7814 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7815 	txq->txq_free = WM_NTXDESC(txq);
   7816 	txq->txq_next = 0;
   7817 }
   7818 
   7819 static void
   7820 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7821     struct wm_txqueue *txq)
   7822 {
   7823 
   7824 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7825 		device_xname(sc->sc_dev), __func__));
   7826 	KASSERT(mutex_owned(txq->txq_lock));
   7827 
   7828 	if (sc->sc_type < WM_T_82543) {
   7829 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7830 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7831 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7832 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7833 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7834 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7835 	} else {
   7836 		int qid = wmq->wmq_id;
   7837 
   7838 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7839 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7840 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7841 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7842 
   7843 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7844 			/*
   7845 			 * Don't write TDT before TCTL.EN is set.
   7846 			 * See the document.
   7847 			 */
   7848 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7849 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7850 			    | TXDCTL_WTHRESH(0));
   7851 		else {
   7852 			/* XXX should update with AIM? */
   7853 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7854 			if (sc->sc_type >= WM_T_82540) {
   7855 				/* Should be the same */
   7856 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7857 			}
   7858 
   7859 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7860 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7861 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7862 		}
   7863 	}
   7864 }
   7865 
   7866 static void
   7867 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7868 {
   7869 	int i;
   7870 
   7871 	KASSERT(mutex_owned(txq->txq_lock));
   7872 
   7873 	/* Initialize the transmit job descriptors. */
   7874 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7875 		txq->txq_soft[i].txs_mbuf = NULL;
   7876 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7877 	txq->txq_snext = 0;
   7878 	txq->txq_sdirty = 0;
   7879 }
   7880 
   7881 static void
   7882 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7883     struct wm_txqueue *txq)
   7884 {
   7885 
   7886 	KASSERT(mutex_owned(txq->txq_lock));
   7887 
   7888 	/*
   7889 	 * Set up some register offsets that are different between
   7890 	 * the i82542 and the i82543 and later chips.
   7891 	 */
   7892 	if (sc->sc_type < WM_T_82543)
   7893 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7894 	else
   7895 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7896 
   7897 	wm_init_tx_descs(sc, txq);
   7898 	wm_init_tx_regs(sc, wmq, txq);
   7899 	wm_init_tx_buffer(sc, txq);
   7900 
   7901 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   7902 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   7903 
   7904 	txq->txq_sending = false;
   7905 }
   7906 
   7907 static void
   7908 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7909     struct wm_rxqueue *rxq)
   7910 {
   7911 
   7912 	KASSERT(mutex_owned(rxq->rxq_lock));
   7913 
   7914 	/*
   7915 	 * Initialize the receive descriptor and receive job
   7916 	 * descriptor rings.
   7917 	 */
   7918 	if (sc->sc_type < WM_T_82543) {
   7919 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7920 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7921 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7922 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7923 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7924 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7925 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7926 
   7927 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7928 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7929 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7930 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7931 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7932 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7933 	} else {
   7934 		int qid = wmq->wmq_id;
   7935 
   7936 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7937 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7938 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7939 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7940 
   7941 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7942 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7943 				panic("%s: MCLBYTES %d unsupported for 82575 "
   7944 				    "or higher\n", __func__, MCLBYTES);
   7945 
   7946 			/*
   7947 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   7948 			 * only.
   7949 			 */
   7950 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   7951 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   7952 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7953 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7954 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7955 			    | RXDCTL_WTHRESH(1));
   7956 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7957 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7958 		} else {
   7959 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7960 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7961 			/* XXX should update with AIM? */
   7962 			CSR_WRITE(sc, WMREG_RDTR,
   7963 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7964 			/* MUST be same */
   7965 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7966 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7967 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7968 		}
   7969 	}
   7970 }
   7971 
   7972 static int
   7973 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7974 {
   7975 	struct wm_rxsoft *rxs;
   7976 	int error, i;
   7977 
   7978 	KASSERT(mutex_owned(rxq->rxq_lock));
   7979 
   7980 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7981 		rxs = &rxq->rxq_soft[i];
   7982 		if (rxs->rxs_mbuf == NULL) {
   7983 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7984 				log(LOG_ERR, "%s: unable to allocate or map "
   7985 				    "rx buffer %d, error = %d\n",
   7986 				    device_xname(sc->sc_dev), i, error);
   7987 				/*
   7988 				 * XXX Should attempt to run with fewer receive
   7989 				 * XXX buffers instead of just failing.
   7990 				 */
   7991 				wm_rxdrain(rxq);
   7992 				return ENOMEM;
   7993 			}
   7994 		} else {
   7995 			/*
   7996 			 * For 82575 and 82576, the RX descriptors must be
   7997 			 * initialized after the setting of RCTL.EN in
   7998 			 * wm_set_filter()
   7999 			 */
   8000 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8001 				wm_init_rxdesc(rxq, i);
   8002 		}
   8003 	}
   8004 	rxq->rxq_ptr = 0;
   8005 	rxq->rxq_discard = 0;
   8006 	WM_RXCHAIN_RESET(rxq);
   8007 
   8008 	return 0;
   8009 }
   8010 
   8011 static int
   8012 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8013     struct wm_rxqueue *rxq)
   8014 {
   8015 
   8016 	KASSERT(mutex_owned(rxq->rxq_lock));
   8017 
   8018 	/*
   8019 	 * Set up some register offsets that are different between
   8020 	 * the i82542 and the i82543 and later chips.
   8021 	 */
   8022 	if (sc->sc_type < WM_T_82543)
   8023 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8024 	else
   8025 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8026 
   8027 	wm_init_rx_regs(sc, wmq, rxq);
   8028 	return wm_init_rx_buffer(sc, rxq);
   8029 }
   8030 
   8031 /*
   8032  * wm_init_quques:
   8033  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8034  */
   8035 static int
   8036 wm_init_txrx_queues(struct wm_softc *sc)
   8037 {
   8038 	int i, error = 0;
   8039 
   8040 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8041 		device_xname(sc->sc_dev), __func__));
   8042 
   8043 	for (i = 0; i < sc->sc_nqueues; i++) {
   8044 		struct wm_queue *wmq = &sc->sc_queue[i];
   8045 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8046 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8047 
   8048 		/*
   8049 		 * TODO
   8050 		 * Currently, use constant variable instead of AIM.
   8051 		 * Furthermore, the interrupt interval of multiqueue which use
   8052 		 * polling mode is less than default value.
   8053 		 * More tuning and AIM are required.
   8054 		 */
   8055 		if (wm_is_using_multiqueue(sc))
   8056 			wmq->wmq_itr = 50;
   8057 		else
   8058 			wmq->wmq_itr = sc->sc_itr_init;
   8059 		wmq->wmq_set_itr = true;
   8060 
   8061 		mutex_enter(txq->txq_lock);
   8062 		wm_init_tx_queue(sc, wmq, txq);
   8063 		mutex_exit(txq->txq_lock);
   8064 
   8065 		mutex_enter(rxq->rxq_lock);
   8066 		error = wm_init_rx_queue(sc, wmq, rxq);
   8067 		mutex_exit(rxq->rxq_lock);
   8068 		if (error)
   8069 			break;
   8070 	}
   8071 
   8072 	return error;
   8073 }
   8074 
   8075 /*
   8076  * wm_tx_offload:
   8077  *
   8078  *	Set up TCP/IP checksumming parameters for the
   8079  *	specified packet.
   8080  */
   8081 static void
   8082 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8083     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8084 {
   8085 	struct mbuf *m0 = txs->txs_mbuf;
   8086 	struct livengood_tcpip_ctxdesc *t;
   8087 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8088 	uint32_t ipcse;
   8089 	struct ether_header *eh;
   8090 	int offset, iphl;
   8091 	uint8_t fields;
   8092 
   8093 	/*
   8094 	 * XXX It would be nice if the mbuf pkthdr had offset
   8095 	 * fields for the protocol headers.
   8096 	 */
   8097 
   8098 	eh = mtod(m0, struct ether_header *);
   8099 	switch (htons(eh->ether_type)) {
   8100 	case ETHERTYPE_IP:
   8101 	case ETHERTYPE_IPV6:
   8102 		offset = ETHER_HDR_LEN;
   8103 		break;
   8104 
   8105 	case ETHERTYPE_VLAN:
   8106 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8107 		break;
   8108 
   8109 	default:
   8110 		/* Don't support this protocol or encapsulation. */
   8111 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8112 		txq->txq_last_hw_ipcs = 0;
   8113 		txq->txq_last_hw_tucs = 0;
   8114 		*fieldsp = 0;
   8115 		*cmdp = 0;
   8116 		return;
   8117 	}
   8118 
   8119 	if ((m0->m_pkthdr.csum_flags &
   8120 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8121 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8122 	} else
   8123 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8124 
   8125 	ipcse = offset + iphl - 1;
   8126 
   8127 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8128 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8129 	seg = 0;
   8130 	fields = 0;
   8131 
   8132 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8133 		int hlen = offset + iphl;
   8134 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8135 
   8136 		if (__predict_false(m0->m_len <
   8137 				    (hlen + sizeof(struct tcphdr)))) {
   8138 			/*
   8139 			 * TCP/IP headers are not in the first mbuf; we need
   8140 			 * to do this the slow and painful way. Let's just
   8141 			 * hope this doesn't happen very often.
   8142 			 */
   8143 			struct tcphdr th;
   8144 
   8145 			WM_Q_EVCNT_INCR(txq, tsopain);
   8146 
   8147 			m_copydata(m0, hlen, sizeof(th), &th);
   8148 			if (v4) {
   8149 				struct ip ip;
   8150 
   8151 				m_copydata(m0, offset, sizeof(ip), &ip);
   8152 				ip.ip_len = 0;
   8153 				m_copyback(m0,
   8154 				    offset + offsetof(struct ip, ip_len),
   8155 				    sizeof(ip.ip_len), &ip.ip_len);
   8156 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8157 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8158 			} else {
   8159 				struct ip6_hdr ip6;
   8160 
   8161 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8162 				ip6.ip6_plen = 0;
   8163 				m_copyback(m0,
   8164 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8165 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8166 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8167 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8168 			}
   8169 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8170 			    sizeof(th.th_sum), &th.th_sum);
   8171 
   8172 			hlen += th.th_off << 2;
   8173 		} else {
   8174 			/*
   8175 			 * TCP/IP headers are in the first mbuf; we can do
   8176 			 * this the easy way.
   8177 			 */
   8178 			struct tcphdr *th;
   8179 
   8180 			if (v4) {
   8181 				struct ip *ip =
   8182 				    (void *)(mtod(m0, char *) + offset);
   8183 				th = (void *)(mtod(m0, char *) + hlen);
   8184 
   8185 				ip->ip_len = 0;
   8186 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8187 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8188 			} else {
   8189 				struct ip6_hdr *ip6 =
   8190 				    (void *)(mtod(m0, char *) + offset);
   8191 				th = (void *)(mtod(m0, char *) + hlen);
   8192 
   8193 				ip6->ip6_plen = 0;
   8194 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8195 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8196 			}
   8197 			hlen += th->th_off << 2;
   8198 		}
   8199 
   8200 		if (v4) {
   8201 			WM_Q_EVCNT_INCR(txq, tso);
   8202 			cmdlen |= WTX_TCPIP_CMD_IP;
   8203 		} else {
   8204 			WM_Q_EVCNT_INCR(txq, tso6);
   8205 			ipcse = 0;
   8206 		}
   8207 		cmd |= WTX_TCPIP_CMD_TSE;
   8208 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8209 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8210 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8211 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8212 	}
   8213 
   8214 	/*
   8215 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8216 	 * offload feature, if we load the context descriptor, we
   8217 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8218 	 */
   8219 
   8220 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8221 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8222 	    WTX_TCPIP_IPCSE(ipcse);
   8223 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8224 		WM_Q_EVCNT_INCR(txq, ipsum);
   8225 		fields |= WTX_IXSM;
   8226 	}
   8227 
   8228 	offset += iphl;
   8229 
   8230 	if (m0->m_pkthdr.csum_flags &
   8231 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8232 		WM_Q_EVCNT_INCR(txq, tusum);
   8233 		fields |= WTX_TXSM;
   8234 		tucs = WTX_TCPIP_TUCSS(offset) |
   8235 		    WTX_TCPIP_TUCSO(offset +
   8236 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8237 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8238 	} else if ((m0->m_pkthdr.csum_flags &
   8239 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8240 		WM_Q_EVCNT_INCR(txq, tusum6);
   8241 		fields |= WTX_TXSM;
   8242 		tucs = WTX_TCPIP_TUCSS(offset) |
   8243 		    WTX_TCPIP_TUCSO(offset +
   8244 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8245 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8246 	} else {
   8247 		/* Just initialize it to a valid TCP context. */
   8248 		tucs = WTX_TCPIP_TUCSS(offset) |
   8249 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8250 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8251 	}
   8252 
   8253 	*cmdp = cmd;
   8254 	*fieldsp = fields;
   8255 
   8256 	/*
   8257 	 * We don't have to write context descriptor for every packet
   8258 	 * except for 82574. For 82574, we must write context descriptor
   8259 	 * for every packet when we use two descriptor queues.
   8260 	 *
   8261 	 * The 82574L can only remember the *last* context used
   8262 	 * regardless of queue that it was use for.  We cannot reuse
   8263 	 * contexts on this hardware platform and must generate a new
   8264 	 * context every time.  82574L hardware spec, section 7.2.6,
   8265 	 * second note.
   8266 	 */
   8267 	if (sc->sc_nqueues < 2) {
   8268 		/*
   8269 		 * Setting up new checksum offload context for every
   8270 		 * frames takes a lot of processing time for hardware.
   8271 		 * This also reduces performance a lot for small sized
   8272 		 * frames so avoid it if driver can use previously
   8273 		 * configured checksum offload context.
   8274 		 * For TSO, in theory we can use the same TSO context only if
   8275 		 * frame is the same type(IP/TCP) and the same MSS. However
   8276 		 * checking whether a frame has the same IP/TCP structure is a
   8277 		 * hard thing so just ignore that and always restablish a
   8278 		 * new TSO context.
   8279 		 */
   8280 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8281 		    == 0) {
   8282 			if (txq->txq_last_hw_cmd == cmd &&
   8283 			    txq->txq_last_hw_fields == fields &&
   8284 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8285 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8286 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8287 				return;
   8288 			}
   8289 		}
   8290 
   8291 		txq->txq_last_hw_cmd = cmd;
   8292 		txq->txq_last_hw_fields = fields;
   8293 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8294 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8295 	}
   8296 
   8297 	/* Fill in the context descriptor. */
   8298 	t = (struct livengood_tcpip_ctxdesc *)
   8299 	    &txq->txq_descs[txq->txq_next];
   8300 	t->tcpip_ipcs = htole32(ipcs);
   8301 	t->tcpip_tucs = htole32(tucs);
   8302 	t->tcpip_cmdlen = htole32(cmdlen);
   8303 	t->tcpip_seg = htole32(seg);
   8304 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8305 
   8306 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8307 	txs->txs_ndesc++;
   8308 }
   8309 
   8310 static inline int
   8311 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8312 {
   8313 	struct wm_softc *sc = ifp->if_softc;
   8314 	u_int cpuid = cpu_index(curcpu());
   8315 
   8316 	/*
   8317 	 * Currently, simple distribute strategy.
   8318 	 * TODO:
   8319 	 * distribute by flowid(RSS has value).
   8320 	 */
   8321 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8322 }
   8323 
   8324 static inline bool
   8325 wm_linkdown_discard(struct wm_txqueue *txq)
   8326 {
   8327 
   8328 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8329 		return true;
   8330 
   8331 	return false;
   8332 }
   8333 
   8334 /*
   8335  * wm_start:		[ifnet interface function]
   8336  *
   8337  *	Start packet transmission on the interface.
   8338  */
   8339 static void
   8340 wm_start(struct ifnet *ifp)
   8341 {
   8342 	struct wm_softc *sc = ifp->if_softc;
   8343 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8344 
   8345 	KASSERT(if_is_mpsafe(ifp));
   8346 	/*
   8347 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8348 	 */
   8349 
   8350 	mutex_enter(txq->txq_lock);
   8351 	if (!txq->txq_stopping)
   8352 		wm_start_locked(ifp);
   8353 	mutex_exit(txq->txq_lock);
   8354 }
   8355 
   8356 static void
   8357 wm_start_locked(struct ifnet *ifp)
   8358 {
   8359 	struct wm_softc *sc = ifp->if_softc;
   8360 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8361 
   8362 	wm_send_common_locked(ifp, txq, false);
   8363 }
   8364 
   8365 static int
   8366 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8367 {
   8368 	int qid;
   8369 	struct wm_softc *sc = ifp->if_softc;
   8370 	struct wm_txqueue *txq;
   8371 
   8372 	qid = wm_select_txqueue(ifp, m);
   8373 	txq = &sc->sc_queue[qid].wmq_txq;
   8374 
   8375 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8376 		m_freem(m);
   8377 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8378 		return ENOBUFS;
   8379 	}
   8380 
   8381 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8382 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8383 	if (m->m_flags & M_MCAST)
   8384 		if_statinc_ref(nsr, if_omcasts);
   8385 	IF_STAT_PUTREF(ifp);
   8386 
   8387 	if (mutex_tryenter(txq->txq_lock)) {
   8388 		if (!txq->txq_stopping)
   8389 			wm_transmit_locked(ifp, txq);
   8390 		mutex_exit(txq->txq_lock);
   8391 	}
   8392 
   8393 	return 0;
   8394 }
   8395 
   8396 static void
   8397 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8398 {
   8399 
   8400 	wm_send_common_locked(ifp, txq, true);
   8401 }
   8402 
   8403 static void
   8404 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8405     bool is_transmit)
   8406 {
   8407 	struct wm_softc *sc = ifp->if_softc;
   8408 	struct mbuf *m0;
   8409 	struct wm_txsoft *txs;
   8410 	bus_dmamap_t dmamap;
   8411 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8412 	bus_addr_t curaddr;
   8413 	bus_size_t seglen, curlen;
   8414 	uint32_t cksumcmd;
   8415 	uint8_t cksumfields;
   8416 	bool remap = true;
   8417 
   8418 	KASSERT(mutex_owned(txq->txq_lock));
   8419 	KASSERT(!txq->txq_stopping);
   8420 
   8421 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8422 		return;
   8423 
   8424 	if (__predict_false(wm_linkdown_discard(txq))) {
   8425 		do {
   8426 			if (is_transmit)
   8427 				m0 = pcq_get(txq->txq_interq);
   8428 			else
   8429 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8430 			/*
   8431 			 * increment successed packet counter as in the case
   8432 			 * which the packet is discarded by link down PHY.
   8433 			 */
   8434 			if (m0 != NULL) {
   8435 				if_statinc(ifp, if_opackets);
   8436 				m_freem(m0);
   8437 			}
   8438 		} while (m0 != NULL);
   8439 		return;
   8440 	}
   8441 
   8442 	/* Remember the previous number of free descriptors. */
   8443 	ofree = txq->txq_free;
   8444 
   8445 	/*
   8446 	 * Loop through the send queue, setting up transmit descriptors
   8447 	 * until we drain the queue, or use up all available transmit
   8448 	 * descriptors.
   8449 	 */
   8450 	for (;;) {
   8451 		m0 = NULL;
   8452 
   8453 		/* Get a work queue entry. */
   8454 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8455 			wm_txeof(txq, UINT_MAX);
   8456 			if (txq->txq_sfree == 0) {
   8457 				DPRINTF(sc, WM_DEBUG_TX,
   8458 				    ("%s: TX: no free job descriptors\n",
   8459 					device_xname(sc->sc_dev)));
   8460 				WM_Q_EVCNT_INCR(txq, txsstall);
   8461 				break;
   8462 			}
   8463 		}
   8464 
   8465 		/* Grab a packet off the queue. */
   8466 		if (is_transmit)
   8467 			m0 = pcq_get(txq->txq_interq);
   8468 		else
   8469 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8470 		if (m0 == NULL)
   8471 			break;
   8472 
   8473 		DPRINTF(sc, WM_DEBUG_TX,
   8474 		    ("%s: TX: have packet to transmit: %p\n",
   8475 			device_xname(sc->sc_dev), m0));
   8476 
   8477 		txs = &txq->txq_soft[txq->txq_snext];
   8478 		dmamap = txs->txs_dmamap;
   8479 
   8480 		use_tso = (m0->m_pkthdr.csum_flags &
   8481 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8482 
   8483 		/*
   8484 		 * So says the Linux driver:
   8485 		 * The controller does a simple calculation to make sure
   8486 		 * there is enough room in the FIFO before initiating the
   8487 		 * DMA for each buffer. The calc is:
   8488 		 *	4 = ceil(buffer len / MSS)
   8489 		 * To make sure we don't overrun the FIFO, adjust the max
   8490 		 * buffer len if the MSS drops.
   8491 		 */
   8492 		dmamap->dm_maxsegsz =
   8493 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8494 		    ? m0->m_pkthdr.segsz << 2
   8495 		    : WTX_MAX_LEN;
   8496 
   8497 		/*
   8498 		 * Load the DMA map.  If this fails, the packet either
   8499 		 * didn't fit in the allotted number of segments, or we
   8500 		 * were short on resources.  For the too-many-segments
   8501 		 * case, we simply report an error and drop the packet,
   8502 		 * since we can't sanely copy a jumbo packet to a single
   8503 		 * buffer.
   8504 		 */
   8505 retry:
   8506 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8507 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8508 		if (__predict_false(error)) {
   8509 			if (error == EFBIG) {
   8510 				if (remap == true) {
   8511 					struct mbuf *m;
   8512 
   8513 					remap = false;
   8514 					m = m_defrag(m0, M_NOWAIT);
   8515 					if (m != NULL) {
   8516 						WM_Q_EVCNT_INCR(txq, defrag);
   8517 						m0 = m;
   8518 						goto retry;
   8519 					}
   8520 				}
   8521 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8522 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8523 				    "DMA segments, dropping...\n",
   8524 				    device_xname(sc->sc_dev));
   8525 				wm_dump_mbuf_chain(sc, m0);
   8526 				m_freem(m0);
   8527 				continue;
   8528 			}
   8529 			/* Short on resources, just stop for now. */
   8530 			DPRINTF(sc, WM_DEBUG_TX,
   8531 			    ("%s: TX: dmamap load failed: %d\n",
   8532 				device_xname(sc->sc_dev), error));
   8533 			break;
   8534 		}
   8535 
   8536 		segs_needed = dmamap->dm_nsegs;
   8537 		if (use_tso) {
   8538 			/* For sentinel descriptor; see below. */
   8539 			segs_needed++;
   8540 		}
   8541 
   8542 		/*
   8543 		 * Ensure we have enough descriptors free to describe
   8544 		 * the packet. Note, we always reserve one descriptor
   8545 		 * at the end of the ring due to the semantics of the
   8546 		 * TDT register, plus one more in the event we need
   8547 		 * to load offload context.
   8548 		 */
   8549 		if (segs_needed > txq->txq_free - 2) {
   8550 			/*
   8551 			 * Not enough free descriptors to transmit this
   8552 			 * packet.  We haven't committed anything yet,
   8553 			 * so just unload the DMA map, put the packet
   8554 			 * pack on the queue, and punt. Notify the upper
   8555 			 * layer that there are no more slots left.
   8556 			 */
   8557 			DPRINTF(sc, WM_DEBUG_TX,
   8558 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8559 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8560 				segs_needed, txq->txq_free - 1));
   8561 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8562 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8563 			WM_Q_EVCNT_INCR(txq, txdstall);
   8564 			break;
   8565 		}
   8566 
   8567 		/*
   8568 		 * Check for 82547 Tx FIFO bug. We need to do this
   8569 		 * once we know we can transmit the packet, since we
   8570 		 * do some internal FIFO space accounting here.
   8571 		 */
   8572 		if (sc->sc_type == WM_T_82547 &&
   8573 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8574 			DPRINTF(sc, WM_DEBUG_TX,
   8575 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8576 				device_xname(sc->sc_dev)));
   8577 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8578 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8579 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8580 			break;
   8581 		}
   8582 
   8583 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8584 
   8585 		DPRINTF(sc, WM_DEBUG_TX,
   8586 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8587 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8588 
   8589 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8590 
   8591 		/*
   8592 		 * Store a pointer to the packet so that we can free it
   8593 		 * later.
   8594 		 *
   8595 		 * Initially, we consider the number of descriptors the
   8596 		 * packet uses the number of DMA segments.  This may be
   8597 		 * incremented by 1 if we do checksum offload (a descriptor
   8598 		 * is used to set the checksum context).
   8599 		 */
   8600 		txs->txs_mbuf = m0;
   8601 		txs->txs_firstdesc = txq->txq_next;
   8602 		txs->txs_ndesc = segs_needed;
   8603 
   8604 		/* Set up offload parameters for this packet. */
   8605 		if (m0->m_pkthdr.csum_flags &
   8606 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8607 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8608 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8609 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8610 		} else {
   8611 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8612 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8613 			cksumcmd = 0;
   8614 			cksumfields = 0;
   8615 		}
   8616 
   8617 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8618 
   8619 		/* Sync the DMA map. */
   8620 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8621 		    BUS_DMASYNC_PREWRITE);
   8622 
   8623 		/* Initialize the transmit descriptor. */
   8624 		for (nexttx = txq->txq_next, seg = 0;
   8625 		     seg < dmamap->dm_nsegs; seg++) {
   8626 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8627 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8628 			     seglen != 0;
   8629 			     curaddr += curlen, seglen -= curlen,
   8630 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8631 				curlen = seglen;
   8632 
   8633 				/*
   8634 				 * So says the Linux driver:
   8635 				 * Work around for premature descriptor
   8636 				 * write-backs in TSO mode.  Append a
   8637 				 * 4-byte sentinel descriptor.
   8638 				 */
   8639 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8640 				    curlen > 8)
   8641 					curlen -= 4;
   8642 
   8643 				wm_set_dma_addr(
   8644 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8645 				txq->txq_descs[nexttx].wtx_cmdlen
   8646 				    = htole32(cksumcmd | curlen);
   8647 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8648 				    = 0;
   8649 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8650 				    = cksumfields;
   8651 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8652 				lasttx = nexttx;
   8653 
   8654 				DPRINTF(sc, WM_DEBUG_TX,
   8655 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8656 					"len %#04zx\n",
   8657 					device_xname(sc->sc_dev), nexttx,
   8658 					(uint64_t)curaddr, curlen));
   8659 			}
   8660 		}
   8661 
   8662 		KASSERT(lasttx != -1);
   8663 
   8664 		/*
   8665 		 * Set up the command byte on the last descriptor of
   8666 		 * the packet. If we're in the interrupt delay window,
   8667 		 * delay the interrupt.
   8668 		 */
   8669 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8670 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8671 
   8672 		/*
   8673 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8674 		 * up the descriptor to encapsulate the packet for us.
   8675 		 *
   8676 		 * This is only valid on the last descriptor of the packet.
   8677 		 */
   8678 		if (vlan_has_tag(m0)) {
   8679 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8680 			    htole32(WTX_CMD_VLE);
   8681 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8682 			    = htole16(vlan_get_tag(m0));
   8683 		}
   8684 
   8685 		txs->txs_lastdesc = lasttx;
   8686 
   8687 		DPRINTF(sc, WM_DEBUG_TX,
   8688 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8689 			device_xname(sc->sc_dev),
   8690 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8691 
   8692 		/* Sync the descriptors we're using. */
   8693 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8694 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8695 
   8696 		/* Give the packet to the chip. */
   8697 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8698 
   8699 		DPRINTF(sc, WM_DEBUG_TX,
   8700 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8701 
   8702 		DPRINTF(sc, WM_DEBUG_TX,
   8703 		    ("%s: TX: finished transmitting packet, job %d\n",
   8704 			device_xname(sc->sc_dev), txq->txq_snext));
   8705 
   8706 		/* Advance the tx pointer. */
   8707 		txq->txq_free -= txs->txs_ndesc;
   8708 		txq->txq_next = nexttx;
   8709 
   8710 		txq->txq_sfree--;
   8711 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8712 
   8713 		/* Pass the packet to any BPF listeners. */
   8714 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8715 	}
   8716 
   8717 	if (m0 != NULL) {
   8718 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8719 		WM_Q_EVCNT_INCR(txq, descdrop);
   8720 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8721 			__func__));
   8722 		m_freem(m0);
   8723 	}
   8724 
   8725 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8726 		/* No more slots; notify upper layer. */
   8727 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8728 	}
   8729 
   8730 	if (txq->txq_free != ofree) {
   8731 		/* Set a watchdog timer in case the chip flakes out. */
   8732 		txq->txq_lastsent = time_uptime;
   8733 		txq->txq_sending = true;
   8734 	}
   8735 }
   8736 
   8737 /*
   8738  * wm_nq_tx_offload:
   8739  *
   8740  *	Set up TCP/IP checksumming parameters for the
   8741  *	specified packet, for NEWQUEUE devices
   8742  */
   8743 static void
   8744 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8745     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8746 {
   8747 	struct mbuf *m0 = txs->txs_mbuf;
   8748 	uint32_t vl_len, mssidx, cmdc;
   8749 	struct ether_header *eh;
   8750 	int offset, iphl;
   8751 
   8752 	/*
   8753 	 * XXX It would be nice if the mbuf pkthdr had offset
   8754 	 * fields for the protocol headers.
   8755 	 */
   8756 	*cmdlenp = 0;
   8757 	*fieldsp = 0;
   8758 
   8759 	eh = mtod(m0, struct ether_header *);
   8760 	switch (htons(eh->ether_type)) {
   8761 	case ETHERTYPE_IP:
   8762 	case ETHERTYPE_IPV6:
   8763 		offset = ETHER_HDR_LEN;
   8764 		break;
   8765 
   8766 	case ETHERTYPE_VLAN:
   8767 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8768 		break;
   8769 
   8770 	default:
   8771 		/* Don't support this protocol or encapsulation. */
   8772 		*do_csum = false;
   8773 		return;
   8774 	}
   8775 	*do_csum = true;
   8776 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8777 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8778 
   8779 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8780 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8781 
   8782 	if ((m0->m_pkthdr.csum_flags &
   8783 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8784 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8785 	} else {
   8786 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8787 	}
   8788 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8789 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8790 
   8791 	if (vlan_has_tag(m0)) {
   8792 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8793 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8794 		*cmdlenp |= NQTX_CMD_VLE;
   8795 	}
   8796 
   8797 	mssidx = 0;
   8798 
   8799 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8800 		int hlen = offset + iphl;
   8801 		int tcp_hlen;
   8802 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8803 
   8804 		if (__predict_false(m0->m_len <
   8805 				    (hlen + sizeof(struct tcphdr)))) {
   8806 			/*
   8807 			 * TCP/IP headers are not in the first mbuf; we need
   8808 			 * to do this the slow and painful way. Let's just
   8809 			 * hope this doesn't happen very often.
   8810 			 */
   8811 			struct tcphdr th;
   8812 
   8813 			WM_Q_EVCNT_INCR(txq, tsopain);
   8814 
   8815 			m_copydata(m0, hlen, sizeof(th), &th);
   8816 			if (v4) {
   8817 				struct ip ip;
   8818 
   8819 				m_copydata(m0, offset, sizeof(ip), &ip);
   8820 				ip.ip_len = 0;
   8821 				m_copyback(m0,
   8822 				    offset + offsetof(struct ip, ip_len),
   8823 				    sizeof(ip.ip_len), &ip.ip_len);
   8824 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8825 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8826 			} else {
   8827 				struct ip6_hdr ip6;
   8828 
   8829 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8830 				ip6.ip6_plen = 0;
   8831 				m_copyback(m0,
   8832 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8833 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8834 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8835 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8836 			}
   8837 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8838 			    sizeof(th.th_sum), &th.th_sum);
   8839 
   8840 			tcp_hlen = th.th_off << 2;
   8841 		} else {
   8842 			/*
   8843 			 * TCP/IP headers are in the first mbuf; we can do
   8844 			 * this the easy way.
   8845 			 */
   8846 			struct tcphdr *th;
   8847 
   8848 			if (v4) {
   8849 				struct ip *ip =
   8850 				    (void *)(mtod(m0, char *) + offset);
   8851 				th = (void *)(mtod(m0, char *) + hlen);
   8852 
   8853 				ip->ip_len = 0;
   8854 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8855 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8856 			} else {
   8857 				struct ip6_hdr *ip6 =
   8858 				    (void *)(mtod(m0, char *) + offset);
   8859 				th = (void *)(mtod(m0, char *) + hlen);
   8860 
   8861 				ip6->ip6_plen = 0;
   8862 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8863 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8864 			}
   8865 			tcp_hlen = th->th_off << 2;
   8866 		}
   8867 		hlen += tcp_hlen;
   8868 		*cmdlenp |= NQTX_CMD_TSE;
   8869 
   8870 		if (v4) {
   8871 			WM_Q_EVCNT_INCR(txq, tso);
   8872 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8873 		} else {
   8874 			WM_Q_EVCNT_INCR(txq, tso6);
   8875 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8876 		}
   8877 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8878 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8879 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8880 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8881 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8882 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8883 	} else {
   8884 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8885 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8886 	}
   8887 
   8888 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8889 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8890 		cmdc |= NQTXC_CMD_IP4;
   8891 	}
   8892 
   8893 	if (m0->m_pkthdr.csum_flags &
   8894 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8895 		WM_Q_EVCNT_INCR(txq, tusum);
   8896 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8897 			cmdc |= NQTXC_CMD_TCP;
   8898 		else
   8899 			cmdc |= NQTXC_CMD_UDP;
   8900 
   8901 		cmdc |= NQTXC_CMD_IP4;
   8902 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8903 	}
   8904 	if (m0->m_pkthdr.csum_flags &
   8905 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8906 		WM_Q_EVCNT_INCR(txq, tusum6);
   8907 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8908 			cmdc |= NQTXC_CMD_TCP;
   8909 		else
   8910 			cmdc |= NQTXC_CMD_UDP;
   8911 
   8912 		cmdc |= NQTXC_CMD_IP6;
   8913 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8914 	}
   8915 
   8916 	/*
   8917 	 * We don't have to write context descriptor for every packet to
   8918 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8919 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8920 	 * controllers.
   8921 	 * It would be overhead to write context descriptor for every packet,
   8922 	 * however it does not cause problems.
   8923 	 */
   8924 	/* Fill in the context descriptor. */
   8925 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
   8926 	    htole32(vl_len);
   8927 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
   8928 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
   8929 	    htole32(cmdc);
   8930 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
   8931 	    htole32(mssidx);
   8932 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8933 	DPRINTF(sc, WM_DEBUG_TX,
   8934 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8935 		txq->txq_next, 0, vl_len));
   8936 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8937 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8938 	txs->txs_ndesc++;
   8939 }
   8940 
   8941 /*
   8942  * wm_nq_start:		[ifnet interface function]
   8943  *
   8944  *	Start packet transmission on the interface for NEWQUEUE devices
   8945  */
   8946 static void
   8947 wm_nq_start(struct ifnet *ifp)
   8948 {
   8949 	struct wm_softc *sc = ifp->if_softc;
   8950 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8951 
   8952 	KASSERT(if_is_mpsafe(ifp));
   8953 	/*
   8954 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8955 	 */
   8956 
   8957 	mutex_enter(txq->txq_lock);
   8958 	if (!txq->txq_stopping)
   8959 		wm_nq_start_locked(ifp);
   8960 	mutex_exit(txq->txq_lock);
   8961 }
   8962 
   8963 static void
   8964 wm_nq_start_locked(struct ifnet *ifp)
   8965 {
   8966 	struct wm_softc *sc = ifp->if_softc;
   8967 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8968 
   8969 	wm_nq_send_common_locked(ifp, txq, false);
   8970 }
   8971 
   8972 static int
   8973 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8974 {
   8975 	int qid;
   8976 	struct wm_softc *sc = ifp->if_softc;
   8977 	struct wm_txqueue *txq;
   8978 
   8979 	qid = wm_select_txqueue(ifp, m);
   8980 	txq = &sc->sc_queue[qid].wmq_txq;
   8981 
   8982 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8983 		m_freem(m);
   8984 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8985 		return ENOBUFS;
   8986 	}
   8987 
   8988 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8989 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8990 	if (m->m_flags & M_MCAST)
   8991 		if_statinc_ref(nsr, if_omcasts);
   8992 	IF_STAT_PUTREF(ifp);
   8993 
   8994 	/*
   8995 	 * The situations which this mutex_tryenter() fails at running time
   8996 	 * are below two patterns.
   8997 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8998 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8999 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9000 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9001 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9002 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9003 	 * stuck, either.
   9004 	 */
   9005 	if (mutex_tryenter(txq->txq_lock)) {
   9006 		if (!txq->txq_stopping)
   9007 			wm_nq_transmit_locked(ifp, txq);
   9008 		mutex_exit(txq->txq_lock);
   9009 	}
   9010 
   9011 	return 0;
   9012 }
   9013 
   9014 static void
   9015 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9016 {
   9017 
   9018 	wm_nq_send_common_locked(ifp, txq, true);
   9019 }
   9020 
   9021 static void
   9022 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9023     bool is_transmit)
   9024 {
   9025 	struct wm_softc *sc = ifp->if_softc;
   9026 	struct mbuf *m0;
   9027 	struct wm_txsoft *txs;
   9028 	bus_dmamap_t dmamap;
   9029 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9030 	bool do_csum, sent;
   9031 	bool remap = true;
   9032 
   9033 	KASSERT(mutex_owned(txq->txq_lock));
   9034 	KASSERT(!txq->txq_stopping);
   9035 
   9036 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9037 		return;
   9038 
   9039 	if (__predict_false(wm_linkdown_discard(txq))) {
   9040 		do {
   9041 			if (is_transmit)
   9042 				m0 = pcq_get(txq->txq_interq);
   9043 			else
   9044 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9045 			/*
   9046 			 * increment successed packet counter as in the case
   9047 			 * which the packet is discarded by link down PHY.
   9048 			 */
   9049 			if (m0 != NULL) {
   9050 				if_statinc(ifp, if_opackets);
   9051 				m_freem(m0);
   9052 			}
   9053 		} while (m0 != NULL);
   9054 		return;
   9055 	}
   9056 
   9057 	sent = false;
   9058 
   9059 	/*
   9060 	 * Loop through the send queue, setting up transmit descriptors
   9061 	 * until we drain the queue, or use up all available transmit
   9062 	 * descriptors.
   9063 	 */
   9064 	for (;;) {
   9065 		m0 = NULL;
   9066 
   9067 		/* Get a work queue entry. */
   9068 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9069 			wm_txeof(txq, UINT_MAX);
   9070 			if (txq->txq_sfree == 0) {
   9071 				DPRINTF(sc, WM_DEBUG_TX,
   9072 				    ("%s: TX: no free job descriptors\n",
   9073 					device_xname(sc->sc_dev)));
   9074 				WM_Q_EVCNT_INCR(txq, txsstall);
   9075 				break;
   9076 			}
   9077 		}
   9078 
   9079 		/* Grab a packet off the queue. */
   9080 		if (is_transmit)
   9081 			m0 = pcq_get(txq->txq_interq);
   9082 		else
   9083 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9084 		if (m0 == NULL)
   9085 			break;
   9086 
   9087 		DPRINTF(sc, WM_DEBUG_TX,
   9088 		    ("%s: TX: have packet to transmit: %p\n",
   9089 			device_xname(sc->sc_dev), m0));
   9090 
   9091 		txs = &txq->txq_soft[txq->txq_snext];
   9092 		dmamap = txs->txs_dmamap;
   9093 
   9094 		/*
   9095 		 * Load the DMA map.  If this fails, the packet either
   9096 		 * didn't fit in the allotted number of segments, or we
   9097 		 * were short on resources.  For the too-many-segments
   9098 		 * case, we simply report an error and drop the packet,
   9099 		 * since we can't sanely copy a jumbo packet to a single
   9100 		 * buffer.
   9101 		 */
   9102 retry:
   9103 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9104 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9105 		if (__predict_false(error)) {
   9106 			if (error == EFBIG) {
   9107 				if (remap == true) {
   9108 					struct mbuf *m;
   9109 
   9110 					remap = false;
   9111 					m = m_defrag(m0, M_NOWAIT);
   9112 					if (m != NULL) {
   9113 						WM_Q_EVCNT_INCR(txq, defrag);
   9114 						m0 = m;
   9115 						goto retry;
   9116 					}
   9117 				}
   9118 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9119 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9120 				    "DMA segments, dropping...\n",
   9121 				    device_xname(sc->sc_dev));
   9122 				wm_dump_mbuf_chain(sc, m0);
   9123 				m_freem(m0);
   9124 				continue;
   9125 			}
   9126 			/* Short on resources, just stop for now. */
   9127 			DPRINTF(sc, WM_DEBUG_TX,
   9128 			    ("%s: TX: dmamap load failed: %d\n",
   9129 				device_xname(sc->sc_dev), error));
   9130 			break;
   9131 		}
   9132 
   9133 		segs_needed = dmamap->dm_nsegs;
   9134 
   9135 		/*
   9136 		 * Ensure we have enough descriptors free to describe
   9137 		 * the packet. Note, we always reserve one descriptor
   9138 		 * at the end of the ring due to the semantics of the
   9139 		 * TDT register, plus one more in the event we need
   9140 		 * to load offload context.
   9141 		 */
   9142 		if (segs_needed > txq->txq_free - 2) {
   9143 			/*
   9144 			 * Not enough free descriptors to transmit this
   9145 			 * packet.  We haven't committed anything yet,
   9146 			 * so just unload the DMA map, put the packet
   9147 			 * pack on the queue, and punt. Notify the upper
   9148 			 * layer that there are no more slots left.
   9149 			 */
   9150 			DPRINTF(sc, WM_DEBUG_TX,
   9151 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9152 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9153 				segs_needed, txq->txq_free - 1));
   9154 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9155 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9156 			WM_Q_EVCNT_INCR(txq, txdstall);
   9157 			break;
   9158 		}
   9159 
   9160 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9161 
   9162 		DPRINTF(sc, WM_DEBUG_TX,
   9163 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9164 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9165 
   9166 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9167 
   9168 		/*
   9169 		 * Store a pointer to the packet so that we can free it
   9170 		 * later.
   9171 		 *
   9172 		 * Initially, we consider the number of descriptors the
   9173 		 * packet uses the number of DMA segments.  This may be
   9174 		 * incremented by 1 if we do checksum offload (a descriptor
   9175 		 * is used to set the checksum context).
   9176 		 */
   9177 		txs->txs_mbuf = m0;
   9178 		txs->txs_firstdesc = txq->txq_next;
   9179 		txs->txs_ndesc = segs_needed;
   9180 
   9181 		/* Set up offload parameters for this packet. */
   9182 		uint32_t cmdlen, fields, dcmdlen;
   9183 		if (m0->m_pkthdr.csum_flags &
   9184 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9185 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9186 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9187 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9188 			    &do_csum);
   9189 		} else {
   9190 			do_csum = false;
   9191 			cmdlen = 0;
   9192 			fields = 0;
   9193 		}
   9194 
   9195 		/* Sync the DMA map. */
   9196 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9197 		    BUS_DMASYNC_PREWRITE);
   9198 
   9199 		/* Initialize the first transmit descriptor. */
   9200 		nexttx = txq->txq_next;
   9201 		if (!do_csum) {
   9202 			/* Set up a legacy descriptor */
   9203 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9204 			    dmamap->dm_segs[0].ds_addr);
   9205 			txq->txq_descs[nexttx].wtx_cmdlen =
   9206 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9207 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9208 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9209 			if (vlan_has_tag(m0)) {
   9210 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9211 				    htole32(WTX_CMD_VLE);
   9212 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9213 				    htole16(vlan_get_tag(m0));
   9214 			} else
   9215 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9216 
   9217 			dcmdlen = 0;
   9218 		} else {
   9219 			/* Set up an advanced data descriptor */
   9220 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9221 			    htole64(dmamap->dm_segs[0].ds_addr);
   9222 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9223 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9224 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9225 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9226 			    htole32(fields);
   9227 			DPRINTF(sc, WM_DEBUG_TX,
   9228 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9229 				device_xname(sc->sc_dev), nexttx,
   9230 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9231 			DPRINTF(sc, WM_DEBUG_TX,
   9232 			    ("\t 0x%08x%08x\n", fields,
   9233 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9234 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9235 		}
   9236 
   9237 		lasttx = nexttx;
   9238 		nexttx = WM_NEXTTX(txq, nexttx);
   9239 		/*
   9240 		 * Fill in the next descriptors. Legacy or advanced format
   9241 		 * is the same here.
   9242 		 */
   9243 		for (seg = 1; seg < dmamap->dm_nsegs;
   9244 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9245 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9246 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9247 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9248 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9249 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9250 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9251 			lasttx = nexttx;
   9252 
   9253 			DPRINTF(sc, WM_DEBUG_TX,
   9254 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9255 				device_xname(sc->sc_dev), nexttx,
   9256 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9257 				dmamap->dm_segs[seg].ds_len));
   9258 		}
   9259 
   9260 		KASSERT(lasttx != -1);
   9261 
   9262 		/*
   9263 		 * Set up the command byte on the last descriptor of
   9264 		 * the packet. If we're in the interrupt delay window,
   9265 		 * delay the interrupt.
   9266 		 */
   9267 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9268 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9269 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9270 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9271 
   9272 		txs->txs_lastdesc = lasttx;
   9273 
   9274 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9275 		    device_xname(sc->sc_dev),
   9276 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9277 
   9278 		/* Sync the descriptors we're using. */
   9279 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9280 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9281 
   9282 		/* Give the packet to the chip. */
   9283 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9284 		sent = true;
   9285 
   9286 		DPRINTF(sc, WM_DEBUG_TX,
   9287 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9288 
   9289 		DPRINTF(sc, WM_DEBUG_TX,
   9290 		    ("%s: TX: finished transmitting packet, job %d\n",
   9291 			device_xname(sc->sc_dev), txq->txq_snext));
   9292 
   9293 		/* Advance the tx pointer. */
   9294 		txq->txq_free -= txs->txs_ndesc;
   9295 		txq->txq_next = nexttx;
   9296 
   9297 		txq->txq_sfree--;
   9298 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9299 
   9300 		/* Pass the packet to any BPF listeners. */
   9301 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9302 	}
   9303 
   9304 	if (m0 != NULL) {
   9305 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9306 		WM_Q_EVCNT_INCR(txq, descdrop);
   9307 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9308 			__func__));
   9309 		m_freem(m0);
   9310 	}
   9311 
   9312 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9313 		/* No more slots; notify upper layer. */
   9314 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9315 	}
   9316 
   9317 	if (sent) {
   9318 		/* Set a watchdog timer in case the chip flakes out. */
   9319 		txq->txq_lastsent = time_uptime;
   9320 		txq->txq_sending = true;
   9321 	}
   9322 }
   9323 
   9324 static void
   9325 wm_deferred_start_locked(struct wm_txqueue *txq)
   9326 {
   9327 	struct wm_softc *sc = txq->txq_sc;
   9328 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9329 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9330 	int qid = wmq->wmq_id;
   9331 
   9332 	KASSERT(mutex_owned(txq->txq_lock));
   9333 	KASSERT(!txq->txq_stopping);
   9334 
   9335 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9336 		/* XXX need for ALTQ or one CPU system */
   9337 		if (qid == 0)
   9338 			wm_nq_start_locked(ifp);
   9339 		wm_nq_transmit_locked(ifp, txq);
   9340 	} else {
   9341 		/* XXX need for ALTQ or one CPU system */
   9342 		if (qid == 0)
   9343 			wm_start_locked(ifp);
   9344 		wm_transmit_locked(ifp, txq);
   9345 	}
   9346 }
   9347 
   9348 /* Interrupt */
   9349 
   9350 /*
   9351  * wm_txeof:
   9352  *
   9353  *	Helper; handle transmit interrupts.
   9354  */
   9355 static bool
   9356 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9357 {
   9358 	struct wm_softc *sc = txq->txq_sc;
   9359 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9360 	struct wm_txsoft *txs;
   9361 	int count = 0;
   9362 	int i;
   9363 	uint8_t status;
   9364 	bool more = false;
   9365 
   9366 	KASSERT(mutex_owned(txq->txq_lock));
   9367 
   9368 	if (txq->txq_stopping)
   9369 		return false;
   9370 
   9371 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9372 
   9373 	/*
   9374 	 * Go through the Tx list and free mbufs for those
   9375 	 * frames which have been transmitted.
   9376 	 */
   9377 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9378 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9379 		txs = &txq->txq_soft[i];
   9380 
   9381 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9382 			device_xname(sc->sc_dev), i));
   9383 
   9384 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9385 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9386 
   9387 		status =
   9388 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9389 		if ((status & WTX_ST_DD) == 0) {
   9390 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9391 			    BUS_DMASYNC_PREREAD);
   9392 			break;
   9393 		}
   9394 
   9395 		if (limit-- == 0) {
   9396 			more = true;
   9397 			DPRINTF(sc, WM_DEBUG_TX,
   9398 			    ("%s: TX: loop limited, job %d is not processed\n",
   9399 				device_xname(sc->sc_dev), i));
   9400 			break;
   9401 		}
   9402 
   9403 		count++;
   9404 		DPRINTF(sc, WM_DEBUG_TX,
   9405 		    ("%s: TX: job %d done: descs %d..%d\n",
   9406 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9407 		    txs->txs_lastdesc));
   9408 
   9409 		/*
   9410 		 * XXX We should probably be using the statistics
   9411 		 * XXX registers, but I don't know if they exist
   9412 		 * XXX on chips before the i82544.
   9413 		 */
   9414 
   9415 #ifdef WM_EVENT_COUNTERS
   9416 		if (status & WTX_ST_TU)
   9417 			WM_Q_EVCNT_INCR(txq, underrun);
   9418 #endif /* WM_EVENT_COUNTERS */
   9419 
   9420 		/*
   9421 		 * 82574 and newer's document says the status field has neither
   9422 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9423 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9424 		 * Developer's Manual", 82574 datasheet and newer.
   9425 		 *
   9426 		 * XXX I saw the LC bit was set on I218 even though the media
   9427 		 * was full duplex, so the bit might be used for other
   9428 		 * meaning ...(I have no document).
   9429 		 */
   9430 
   9431 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9432 		    && ((sc->sc_type < WM_T_82574)
   9433 			|| (sc->sc_type == WM_T_80003))) {
   9434 			if_statinc(ifp, if_oerrors);
   9435 			if (status & WTX_ST_LC)
   9436 				log(LOG_WARNING, "%s: late collision\n",
   9437 				    device_xname(sc->sc_dev));
   9438 			else if (status & WTX_ST_EC) {
   9439 				if_statadd(ifp, if_collisions,
   9440 				    TX_COLLISION_THRESHOLD + 1);
   9441 				log(LOG_WARNING, "%s: excessive collisions\n",
   9442 				    device_xname(sc->sc_dev));
   9443 			}
   9444 		} else
   9445 			if_statinc(ifp, if_opackets);
   9446 
   9447 		txq->txq_packets++;
   9448 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9449 
   9450 		txq->txq_free += txs->txs_ndesc;
   9451 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9452 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9453 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9454 		m_freem(txs->txs_mbuf);
   9455 		txs->txs_mbuf = NULL;
   9456 	}
   9457 
   9458 	/* Update the dirty transmit buffer pointer. */
   9459 	txq->txq_sdirty = i;
   9460 	DPRINTF(sc, WM_DEBUG_TX,
   9461 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9462 
   9463 	if (count != 0)
   9464 		rnd_add_uint32(&sc->rnd_source, count);
   9465 
   9466 	/*
   9467 	 * If there are no more pending transmissions, cancel the watchdog
   9468 	 * timer.
   9469 	 */
   9470 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9471 		txq->txq_sending = false;
   9472 
   9473 	return more;
   9474 }
   9475 
   9476 static inline uint32_t
   9477 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9478 {
   9479 	struct wm_softc *sc = rxq->rxq_sc;
   9480 
   9481 	if (sc->sc_type == WM_T_82574)
   9482 		return EXTRXC_STATUS(
   9483 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9484 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9485 		return NQRXC_STATUS(
   9486 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9487 	else
   9488 		return rxq->rxq_descs[idx].wrx_status;
   9489 }
   9490 
   9491 static inline uint32_t
   9492 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9493 {
   9494 	struct wm_softc *sc = rxq->rxq_sc;
   9495 
   9496 	if (sc->sc_type == WM_T_82574)
   9497 		return EXTRXC_ERROR(
   9498 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9499 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9500 		return NQRXC_ERROR(
   9501 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9502 	else
   9503 		return rxq->rxq_descs[idx].wrx_errors;
   9504 }
   9505 
   9506 static inline uint16_t
   9507 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9508 {
   9509 	struct wm_softc *sc = rxq->rxq_sc;
   9510 
   9511 	if (sc->sc_type == WM_T_82574)
   9512 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9513 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9514 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9515 	else
   9516 		return rxq->rxq_descs[idx].wrx_special;
   9517 }
   9518 
   9519 static inline int
   9520 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9521 {
   9522 	struct wm_softc *sc = rxq->rxq_sc;
   9523 
   9524 	if (sc->sc_type == WM_T_82574)
   9525 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9526 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9527 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9528 	else
   9529 		return rxq->rxq_descs[idx].wrx_len;
   9530 }
   9531 
   9532 #ifdef WM_DEBUG
   9533 static inline uint32_t
   9534 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9535 {
   9536 	struct wm_softc *sc = rxq->rxq_sc;
   9537 
   9538 	if (sc->sc_type == WM_T_82574)
   9539 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9540 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9541 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9542 	else
   9543 		return 0;
   9544 }
   9545 
   9546 static inline uint8_t
   9547 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9548 {
   9549 	struct wm_softc *sc = rxq->rxq_sc;
   9550 
   9551 	if (sc->sc_type == WM_T_82574)
   9552 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9553 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9554 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9555 	else
   9556 		return 0;
   9557 }
   9558 #endif /* WM_DEBUG */
   9559 
   9560 static inline bool
   9561 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9562     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9563 {
   9564 
   9565 	if (sc->sc_type == WM_T_82574)
   9566 		return (status & ext_bit) != 0;
   9567 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9568 		return (status & nq_bit) != 0;
   9569 	else
   9570 		return (status & legacy_bit) != 0;
   9571 }
   9572 
   9573 static inline bool
   9574 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9575     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9576 {
   9577 
   9578 	if (sc->sc_type == WM_T_82574)
   9579 		return (error & ext_bit) != 0;
   9580 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9581 		return (error & nq_bit) != 0;
   9582 	else
   9583 		return (error & legacy_bit) != 0;
   9584 }
   9585 
   9586 static inline bool
   9587 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9588 {
   9589 
   9590 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9591 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9592 		return true;
   9593 	else
   9594 		return false;
   9595 }
   9596 
   9597 static inline bool
   9598 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9599 {
   9600 	struct wm_softc *sc = rxq->rxq_sc;
   9601 
   9602 	/* XXX missing error bit for newqueue? */
   9603 	if (wm_rxdesc_is_set_error(sc, errors,
   9604 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9605 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9606 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9607 		NQRXC_ERROR_RXE)) {
   9608 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9609 		    EXTRXC_ERROR_SE, 0))
   9610 			log(LOG_WARNING, "%s: symbol error\n",
   9611 			    device_xname(sc->sc_dev));
   9612 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9613 		    EXTRXC_ERROR_SEQ, 0))
   9614 			log(LOG_WARNING, "%s: receive sequence error\n",
   9615 			    device_xname(sc->sc_dev));
   9616 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9617 		    EXTRXC_ERROR_CE, 0))
   9618 			log(LOG_WARNING, "%s: CRC error\n",
   9619 			    device_xname(sc->sc_dev));
   9620 		return true;
   9621 	}
   9622 
   9623 	return false;
   9624 }
   9625 
   9626 static inline bool
   9627 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9628 {
   9629 	struct wm_softc *sc = rxq->rxq_sc;
   9630 
   9631 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9632 		NQRXC_STATUS_DD)) {
   9633 		/* We have processed all of the receive descriptors. */
   9634 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9635 		return false;
   9636 	}
   9637 
   9638 	return true;
   9639 }
   9640 
   9641 static inline bool
   9642 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9643     uint16_t vlantag, struct mbuf *m)
   9644 {
   9645 
   9646 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9647 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9648 		vlan_set_tag(m, le16toh(vlantag));
   9649 	}
   9650 
   9651 	return true;
   9652 }
   9653 
   9654 static inline void
   9655 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9656     uint32_t errors, struct mbuf *m)
   9657 {
   9658 	struct wm_softc *sc = rxq->rxq_sc;
   9659 
   9660 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9661 		if (wm_rxdesc_is_set_status(sc, status,
   9662 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9663 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9664 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9665 			if (wm_rxdesc_is_set_error(sc, errors,
   9666 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9667 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9668 		}
   9669 		if (wm_rxdesc_is_set_status(sc, status,
   9670 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9671 			/*
   9672 			 * Note: we don't know if this was TCP or UDP,
   9673 			 * so we just set both bits, and expect the
   9674 			 * upper layers to deal.
   9675 			 */
   9676 			WM_Q_EVCNT_INCR(rxq, tusum);
   9677 			m->m_pkthdr.csum_flags |=
   9678 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9679 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9680 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9681 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9682 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9683 		}
   9684 	}
   9685 }
   9686 
   9687 /*
   9688  * wm_rxeof:
   9689  *
   9690  *	Helper; handle receive interrupts.
   9691  */
   9692 static bool
   9693 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9694 {
   9695 	struct wm_softc *sc = rxq->rxq_sc;
   9696 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9697 	struct wm_rxsoft *rxs;
   9698 	struct mbuf *m;
   9699 	int i, len;
   9700 	int count = 0;
   9701 	uint32_t status, errors;
   9702 	uint16_t vlantag;
   9703 	bool more = false;
   9704 
   9705 	KASSERT(mutex_owned(rxq->rxq_lock));
   9706 
   9707 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9708 		rxs = &rxq->rxq_soft[i];
   9709 
   9710 		DPRINTF(sc, WM_DEBUG_RX,
   9711 		    ("%s: RX: checking descriptor %d\n",
   9712 			device_xname(sc->sc_dev), i));
   9713 		wm_cdrxsync(rxq, i,
   9714 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9715 
   9716 		status = wm_rxdesc_get_status(rxq, i);
   9717 		errors = wm_rxdesc_get_errors(rxq, i);
   9718 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9719 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9720 #ifdef WM_DEBUG
   9721 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9722 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9723 #endif
   9724 
   9725 		if (!wm_rxdesc_dd(rxq, i, status))
   9726 			break;
   9727 
   9728 		if (limit-- == 0) {
   9729 			more = true;
   9730 			DPRINTF(sc, WM_DEBUG_RX,
   9731 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9732 				device_xname(sc->sc_dev), i));
   9733 			break;
   9734 		}
   9735 
   9736 		count++;
   9737 		if (__predict_false(rxq->rxq_discard)) {
   9738 			DPRINTF(sc, WM_DEBUG_RX,
   9739 			    ("%s: RX: discarding contents of descriptor %d\n",
   9740 				device_xname(sc->sc_dev), i));
   9741 			wm_init_rxdesc(rxq, i);
   9742 			if (wm_rxdesc_is_eop(rxq, status)) {
   9743 				/* Reset our state. */
   9744 				DPRINTF(sc, WM_DEBUG_RX,
   9745 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9746 					device_xname(sc->sc_dev)));
   9747 				rxq->rxq_discard = 0;
   9748 			}
   9749 			continue;
   9750 		}
   9751 
   9752 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9753 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9754 
   9755 		m = rxs->rxs_mbuf;
   9756 
   9757 		/*
   9758 		 * Add a new receive buffer to the ring, unless of
   9759 		 * course the length is zero. Treat the latter as a
   9760 		 * failed mapping.
   9761 		 */
   9762 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9763 			/*
   9764 			 * Failed, throw away what we've done so
   9765 			 * far, and discard the rest of the packet.
   9766 			 */
   9767 			if_statinc(ifp, if_ierrors);
   9768 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9769 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9770 			wm_init_rxdesc(rxq, i);
   9771 			if (!wm_rxdesc_is_eop(rxq, status))
   9772 				rxq->rxq_discard = 1;
   9773 			if (rxq->rxq_head != NULL)
   9774 				m_freem(rxq->rxq_head);
   9775 			WM_RXCHAIN_RESET(rxq);
   9776 			DPRINTF(sc, WM_DEBUG_RX,
   9777 			    ("%s: RX: Rx buffer allocation failed, "
   9778 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9779 				rxq->rxq_discard ? " (discard)" : ""));
   9780 			continue;
   9781 		}
   9782 
   9783 		m->m_len = len;
   9784 		rxq->rxq_len += len;
   9785 		DPRINTF(sc, WM_DEBUG_RX,
   9786 		    ("%s: RX: buffer at %p len %d\n",
   9787 			device_xname(sc->sc_dev), m->m_data, len));
   9788 
   9789 		/* If this is not the end of the packet, keep looking. */
   9790 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9791 			WM_RXCHAIN_LINK(rxq, m);
   9792 			DPRINTF(sc, WM_DEBUG_RX,
   9793 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9794 				device_xname(sc->sc_dev), rxq->rxq_len));
   9795 			continue;
   9796 		}
   9797 
   9798 		/*
   9799 		 * Okay, we have the entire packet now. The chip is
   9800 		 * configured to include the FCS except I35[04], I21[01].
   9801 		 * (not all chips can be configured to strip it), so we need
   9802 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9803 		 * in RCTL register is always set, so we don't trim it.
   9804 		 * PCH2 and newer chip also not include FCS when jumbo
   9805 		 * frame is used to do workaround an errata.
   9806 		 * May need to adjust length of previous mbuf in the
   9807 		 * chain if the current mbuf is too short.
   9808 		 */
   9809 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9810 			if (m->m_len < ETHER_CRC_LEN) {
   9811 				rxq->rxq_tail->m_len
   9812 				    -= (ETHER_CRC_LEN - m->m_len);
   9813 				m->m_len = 0;
   9814 			} else
   9815 				m->m_len -= ETHER_CRC_LEN;
   9816 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9817 		} else
   9818 			len = rxq->rxq_len;
   9819 
   9820 		WM_RXCHAIN_LINK(rxq, m);
   9821 
   9822 		*rxq->rxq_tailp = NULL;
   9823 		m = rxq->rxq_head;
   9824 
   9825 		WM_RXCHAIN_RESET(rxq);
   9826 
   9827 		DPRINTF(sc, WM_DEBUG_RX,
   9828 		    ("%s: RX: have entire packet, len -> %d\n",
   9829 			device_xname(sc->sc_dev), len));
   9830 
   9831 		/* If an error occurred, update stats and drop the packet. */
   9832 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9833 			m_freem(m);
   9834 			continue;
   9835 		}
   9836 
   9837 		/* No errors.  Receive the packet. */
   9838 		m_set_rcvif(m, ifp);
   9839 		m->m_pkthdr.len = len;
   9840 		/*
   9841 		 * TODO
   9842 		 * should be save rsshash and rsstype to this mbuf.
   9843 		 */
   9844 		DPRINTF(sc, WM_DEBUG_RX,
   9845 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9846 			device_xname(sc->sc_dev), rsstype, rsshash));
   9847 
   9848 		/*
   9849 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9850 		 * for us.  Associate the tag with the packet.
   9851 		 */
   9852 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9853 			continue;
   9854 
   9855 		/* Set up checksum info for this packet. */
   9856 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9857 
   9858 		rxq->rxq_packets++;
   9859 		rxq->rxq_bytes += len;
   9860 		/* Pass it on. */
   9861 		if_percpuq_enqueue(sc->sc_ipq, m);
   9862 
   9863 		if (rxq->rxq_stopping)
   9864 			break;
   9865 	}
   9866 	rxq->rxq_ptr = i;
   9867 
   9868 	if (count != 0)
   9869 		rnd_add_uint32(&sc->rnd_source, count);
   9870 
   9871 	DPRINTF(sc, WM_DEBUG_RX,
   9872 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9873 
   9874 	return more;
   9875 }
   9876 
   9877 /*
   9878  * wm_linkintr_gmii:
   9879  *
   9880  *	Helper; handle link interrupts for GMII.
   9881  */
   9882 static void
   9883 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9884 {
   9885 	device_t dev = sc->sc_dev;
   9886 	uint32_t status, reg;
   9887 	bool link;
   9888 	int rv;
   9889 
   9890 	KASSERT(mutex_owned(sc->sc_core_lock));
   9891 
   9892 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9893 		__func__));
   9894 
   9895 	if ((icr & ICR_LSC) == 0) {
   9896 		if (icr & ICR_RXSEQ)
   9897 			DPRINTF(sc, WM_DEBUG_LINK,
   9898 			    ("%s: LINK Receive sequence error\n",
   9899 				device_xname(dev)));
   9900 		return;
   9901 	}
   9902 
   9903 	/* Link status changed */
   9904 	status = CSR_READ(sc, WMREG_STATUS);
   9905 	link = status & STATUS_LU;
   9906 	if (link) {
   9907 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9908 			device_xname(dev),
   9909 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9910 		if (wm_phy_need_linkdown_discard(sc)) {
   9911 			DPRINTF(sc, WM_DEBUG_LINK,
   9912 			    ("%s: linkintr: Clear linkdown discard flag\n",
   9913 				device_xname(dev)));
   9914 			wm_clear_linkdown_discard(sc);
   9915 		}
   9916 	} else {
   9917 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9918 			device_xname(dev)));
   9919 		if (wm_phy_need_linkdown_discard(sc)) {
   9920 			DPRINTF(sc, WM_DEBUG_LINK,
   9921 			    ("%s: linkintr: Set linkdown discard flag\n",
   9922 				device_xname(dev)));
   9923 			wm_set_linkdown_discard(sc);
   9924 		}
   9925 	}
   9926 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9927 		wm_gig_downshift_workaround_ich8lan(sc);
   9928 
   9929 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   9930 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9931 
   9932 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9933 		device_xname(dev)));
   9934 	mii_pollstat(&sc->sc_mii);
   9935 	if (sc->sc_type == WM_T_82543) {
   9936 		int miistatus, active;
   9937 
   9938 		/*
   9939 		 * With 82543, we need to force speed and
   9940 		 * duplex on the MAC equal to what the PHY
   9941 		 * speed and duplex configuration is.
   9942 		 */
   9943 		miistatus = sc->sc_mii.mii_media_status;
   9944 
   9945 		if (miistatus & IFM_ACTIVE) {
   9946 			active = sc->sc_mii.mii_media_active;
   9947 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9948 			switch (IFM_SUBTYPE(active)) {
   9949 			case IFM_10_T:
   9950 				sc->sc_ctrl |= CTRL_SPEED_10;
   9951 				break;
   9952 			case IFM_100_TX:
   9953 				sc->sc_ctrl |= CTRL_SPEED_100;
   9954 				break;
   9955 			case IFM_1000_T:
   9956 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9957 				break;
   9958 			default:
   9959 				/*
   9960 				 * Fiber?
   9961 				 * Shoud not enter here.
   9962 				 */
   9963 				device_printf(dev, "unknown media (%x)\n",
   9964 				    active);
   9965 				break;
   9966 			}
   9967 			if (active & IFM_FDX)
   9968 				sc->sc_ctrl |= CTRL_FD;
   9969 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9970 		}
   9971 	} else if (sc->sc_type == WM_T_PCH) {
   9972 		wm_k1_gig_workaround_hv(sc,
   9973 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9974 	}
   9975 
   9976 	/*
   9977 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9978 	 * aggressive resulting in many collisions. To avoid this, increase
   9979 	 * the IPG and reduce Rx latency in the PHY.
   9980 	 */
   9981 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9982 	    && link) {
   9983 		uint32_t tipg_reg;
   9984 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9985 		bool fdx;
   9986 		uint16_t emi_addr, emi_val;
   9987 
   9988 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9989 		tipg_reg &= ~TIPG_IPGT_MASK;
   9990 		fdx = status & STATUS_FD;
   9991 
   9992 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9993 			tipg_reg |= 0xff;
   9994 			/* Reduce Rx latency in analog PHY */
   9995 			emi_val = 0;
   9996 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9997 		    fdx && speed != STATUS_SPEED_1000) {
   9998 			tipg_reg |= 0xc;
   9999 			emi_val = 1;
   10000 		} else {
   10001 			/* Roll back the default values */
   10002 			tipg_reg |= 0x08;
   10003 			emi_val = 1;
   10004 		}
   10005 
   10006 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10007 
   10008 		rv = sc->phy.acquire(sc);
   10009 		if (rv)
   10010 			return;
   10011 
   10012 		if (sc->sc_type == WM_T_PCH2)
   10013 			emi_addr = I82579_RX_CONFIG;
   10014 		else
   10015 			emi_addr = I217_RX_CONFIG;
   10016 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10017 
   10018 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10019 			uint16_t phy_reg;
   10020 
   10021 			sc->phy.readreg_locked(dev, 2,
   10022 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10023 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10024 			if (speed == STATUS_SPEED_100
   10025 			    || speed == STATUS_SPEED_10)
   10026 				phy_reg |= 0x3e8;
   10027 			else
   10028 				phy_reg |= 0xfa;
   10029 			sc->phy.writereg_locked(dev, 2,
   10030 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10031 
   10032 			if (speed == STATUS_SPEED_1000) {
   10033 				sc->phy.readreg_locked(dev, 2,
   10034 				    HV_PM_CTRL, &phy_reg);
   10035 
   10036 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10037 
   10038 				sc->phy.writereg_locked(dev, 2,
   10039 				    HV_PM_CTRL, phy_reg);
   10040 			}
   10041 		}
   10042 		sc->phy.release(sc);
   10043 
   10044 		if (rv)
   10045 			return;
   10046 
   10047 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10048 			uint16_t data, ptr_gap;
   10049 
   10050 			if (speed == STATUS_SPEED_1000) {
   10051 				rv = sc->phy.acquire(sc);
   10052 				if (rv)
   10053 					return;
   10054 
   10055 				rv = sc->phy.readreg_locked(dev, 2,
   10056 				    I82579_UNKNOWN1, &data);
   10057 				if (rv) {
   10058 					sc->phy.release(sc);
   10059 					return;
   10060 				}
   10061 
   10062 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10063 				if (ptr_gap < 0x18) {
   10064 					data &= ~(0x3ff << 2);
   10065 					data |= (0x18 << 2);
   10066 					rv = sc->phy.writereg_locked(dev,
   10067 					    2, I82579_UNKNOWN1, data);
   10068 				}
   10069 				sc->phy.release(sc);
   10070 				if (rv)
   10071 					return;
   10072 			} else {
   10073 				rv = sc->phy.acquire(sc);
   10074 				if (rv)
   10075 					return;
   10076 
   10077 				rv = sc->phy.writereg_locked(dev, 2,
   10078 				    I82579_UNKNOWN1, 0xc023);
   10079 				sc->phy.release(sc);
   10080 				if (rv)
   10081 					return;
   10082 
   10083 			}
   10084 		}
   10085 	}
   10086 
   10087 	/*
   10088 	 * I217 Packet Loss issue:
   10089 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10090 	 * on power up.
   10091 	 * Set the Beacon Duration for I217 to 8 usec
   10092 	 */
   10093 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10094 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10095 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10096 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10097 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10098 	}
   10099 
   10100 	/* Work-around I218 hang issue */
   10101 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10102 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10103 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10104 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10105 		wm_k1_workaround_lpt_lp(sc, link);
   10106 
   10107 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10108 		/*
   10109 		 * Set platform power management values for Latency
   10110 		 * Tolerance Reporting (LTR)
   10111 		 */
   10112 		wm_platform_pm_pch_lpt(sc,
   10113 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10114 	}
   10115 
   10116 	/* Clear link partner's EEE ability */
   10117 	sc->eee_lp_ability = 0;
   10118 
   10119 	/* FEXTNVM6 K1-off workaround */
   10120 	if (sc->sc_type == WM_T_PCH_SPT) {
   10121 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10122 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10123 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10124 		else
   10125 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10126 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10127 	}
   10128 
   10129 	if (!link)
   10130 		return;
   10131 
   10132 	switch (sc->sc_type) {
   10133 	case WM_T_PCH2:
   10134 		wm_k1_workaround_lv(sc);
   10135 		/* FALLTHROUGH */
   10136 	case WM_T_PCH:
   10137 		if (sc->sc_phytype == WMPHY_82578)
   10138 			wm_link_stall_workaround_hv(sc);
   10139 		break;
   10140 	default:
   10141 		break;
   10142 	}
   10143 
   10144 	/* Enable/Disable EEE after link up */
   10145 	if (sc->sc_phytype > WMPHY_82579)
   10146 		wm_set_eee_pchlan(sc);
   10147 }
   10148 
   10149 /*
   10150  * wm_linkintr_tbi:
   10151  *
   10152  *	Helper; handle link interrupts for TBI mode.
   10153  */
   10154 static void
   10155 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10156 {
   10157 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10158 	uint32_t status;
   10159 
   10160 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10161 		__func__));
   10162 
   10163 	status = CSR_READ(sc, WMREG_STATUS);
   10164 	if (icr & ICR_LSC) {
   10165 		wm_check_for_link(sc);
   10166 		if (status & STATUS_LU) {
   10167 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10168 				device_xname(sc->sc_dev),
   10169 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10170 			/*
   10171 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10172 			 * so we should update sc->sc_ctrl
   10173 			 */
   10174 
   10175 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10176 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10177 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10178 			if (status & STATUS_FD)
   10179 				sc->sc_tctl |=
   10180 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10181 			else
   10182 				sc->sc_tctl |=
   10183 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10184 			if (sc->sc_ctrl & CTRL_TFCE)
   10185 				sc->sc_fcrtl |= FCRTL_XONE;
   10186 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10187 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10188 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10189 			sc->sc_tbi_linkup = 1;
   10190 			if_link_state_change(ifp, LINK_STATE_UP);
   10191 		} else {
   10192 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10193 				device_xname(sc->sc_dev)));
   10194 			sc->sc_tbi_linkup = 0;
   10195 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10196 		}
   10197 		/* Update LED */
   10198 		wm_tbi_serdes_set_linkled(sc);
   10199 	} else if (icr & ICR_RXSEQ)
   10200 		DPRINTF(sc, WM_DEBUG_LINK,
   10201 		    ("%s: LINK: Receive sequence error\n",
   10202 			device_xname(sc->sc_dev)));
   10203 }
   10204 
   10205 /*
   10206  * wm_linkintr_serdes:
   10207  *
   10208  *	Helper; handle link interrupts for TBI mode.
   10209  */
   10210 static void
   10211 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10212 {
   10213 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10214 	struct mii_data *mii = &sc->sc_mii;
   10215 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10216 	uint32_t pcs_adv, pcs_lpab, reg;
   10217 
   10218 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10219 		__func__));
   10220 
   10221 	if (icr & ICR_LSC) {
   10222 		/* Check PCS */
   10223 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10224 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10225 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10226 				device_xname(sc->sc_dev)));
   10227 			mii->mii_media_status |= IFM_ACTIVE;
   10228 			sc->sc_tbi_linkup = 1;
   10229 			if_link_state_change(ifp, LINK_STATE_UP);
   10230 		} else {
   10231 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10232 				device_xname(sc->sc_dev)));
   10233 			mii->mii_media_status |= IFM_NONE;
   10234 			sc->sc_tbi_linkup = 0;
   10235 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10236 			wm_tbi_serdes_set_linkled(sc);
   10237 			return;
   10238 		}
   10239 		mii->mii_media_active |= IFM_1000_SX;
   10240 		if ((reg & PCS_LSTS_FDX) != 0)
   10241 			mii->mii_media_active |= IFM_FDX;
   10242 		else
   10243 			mii->mii_media_active |= IFM_HDX;
   10244 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10245 			/* Check flow */
   10246 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10247 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10248 				DPRINTF(sc, WM_DEBUG_LINK,
   10249 				    ("XXX LINKOK but not ACOMP\n"));
   10250 				return;
   10251 			}
   10252 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10253 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10254 			DPRINTF(sc, WM_DEBUG_LINK,
   10255 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10256 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10257 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10258 				mii->mii_media_active |= IFM_FLOW
   10259 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10260 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10261 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10262 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10263 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10264 				mii->mii_media_active |= IFM_FLOW
   10265 				    | IFM_ETH_TXPAUSE;
   10266 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10267 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10268 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10269 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10270 				mii->mii_media_active |= IFM_FLOW
   10271 				    | IFM_ETH_RXPAUSE;
   10272 		}
   10273 		/* Update LED */
   10274 		wm_tbi_serdes_set_linkled(sc);
   10275 	} else
   10276 		DPRINTF(sc, WM_DEBUG_LINK,
   10277 		    ("%s: LINK: Receive sequence error\n",
   10278 		    device_xname(sc->sc_dev)));
   10279 }
   10280 
   10281 /*
   10282  * wm_linkintr:
   10283  *
   10284  *	Helper; handle link interrupts.
   10285  */
   10286 static void
   10287 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10288 {
   10289 
   10290 	KASSERT(mutex_owned(sc->sc_core_lock));
   10291 
   10292 	if (sc->sc_flags & WM_F_HAS_MII)
   10293 		wm_linkintr_gmii(sc, icr);
   10294 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10295 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10296 		wm_linkintr_serdes(sc, icr);
   10297 	else
   10298 		wm_linkintr_tbi(sc, icr);
   10299 }
   10300 
   10301 
   10302 static inline void
   10303 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10304 {
   10305 
   10306 	if (wmq->wmq_txrx_use_workqueue)
   10307 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   10308 	else
   10309 		softint_schedule(wmq->wmq_si);
   10310 }
   10311 
   10312 static inline void
   10313 wm_legacy_intr_disable(struct wm_softc *sc)
   10314 {
   10315 
   10316 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10317 }
   10318 
   10319 static inline void
   10320 wm_legacy_intr_enable(struct wm_softc *sc)
   10321 {
   10322 
   10323 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10324 }
   10325 
   10326 /*
   10327  * wm_intr_legacy:
   10328  *
   10329  *	Interrupt service routine for INTx and MSI.
   10330  */
   10331 static int
   10332 wm_intr_legacy(void *arg)
   10333 {
   10334 	struct wm_softc *sc = arg;
   10335 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10336 	struct wm_queue *wmq = &sc->sc_queue[0];
   10337 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10338 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10339 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10340 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10341 	uint32_t icr, rndval = 0;
   10342 	bool more = false;
   10343 
   10344 	icr = CSR_READ(sc, WMREG_ICR);
   10345 	if ((icr & sc->sc_icr) == 0)
   10346 		return 0;
   10347 
   10348 	DPRINTF(sc, WM_DEBUG_TX,
   10349 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10350 	if (rndval == 0)
   10351 		rndval = icr;
   10352 
   10353 	mutex_enter(txq->txq_lock);
   10354 
   10355 	if (txq->txq_stopping) {
   10356 		mutex_exit(txq->txq_lock);
   10357 		return 1;
   10358 	}
   10359 
   10360 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10361 	if (icr & ICR_TXDW) {
   10362 		DPRINTF(sc, WM_DEBUG_TX,
   10363 		    ("%s: TX: got TXDW interrupt\n",
   10364 			device_xname(sc->sc_dev)));
   10365 		WM_Q_EVCNT_INCR(txq, txdw);
   10366 	}
   10367 #endif
   10368 	if (txlimit > 0) {
   10369 		more |= wm_txeof(txq, txlimit);
   10370 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10371 			more = true;
   10372 	} else
   10373 		more = true;
   10374 	mutex_exit(txq->txq_lock);
   10375 
   10376 	mutex_enter(rxq->rxq_lock);
   10377 
   10378 	if (rxq->rxq_stopping) {
   10379 		mutex_exit(rxq->rxq_lock);
   10380 		return 1;
   10381 	}
   10382 
   10383 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10384 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10385 		DPRINTF(sc, WM_DEBUG_RX,
   10386 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10387 			device_xname(sc->sc_dev),
   10388 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10389 		WM_Q_EVCNT_INCR(rxq, intr);
   10390 	}
   10391 #endif
   10392 	if (rxlimit > 0) {
   10393 		/*
   10394 		 * wm_rxeof() does *not* call upper layer functions directly,
   10395 		 * as if_percpuq_enqueue() just call softint_schedule().
   10396 		 * So, we can call wm_rxeof() in interrupt context.
   10397 		 */
   10398 		more = wm_rxeof(rxq, rxlimit);
   10399 	} else
   10400 		more = true;
   10401 
   10402 	mutex_exit(rxq->rxq_lock);
   10403 
   10404 	mutex_enter(sc->sc_core_lock);
   10405 
   10406 	if (sc->sc_core_stopping) {
   10407 		mutex_exit(sc->sc_core_lock);
   10408 		return 1;
   10409 	}
   10410 
   10411 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10412 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10413 		wm_linkintr(sc, icr);
   10414 	}
   10415 	if ((icr & ICR_GPI(0)) != 0)
   10416 		device_printf(sc->sc_dev, "got module interrupt\n");
   10417 
   10418 	mutex_exit(sc->sc_core_lock);
   10419 
   10420 	if (icr & ICR_RXO) {
   10421 #if defined(WM_DEBUG)
   10422 		log(LOG_WARNING, "%s: Receive overrun\n",
   10423 		    device_xname(sc->sc_dev));
   10424 #endif /* defined(WM_DEBUG) */
   10425 	}
   10426 
   10427 	rnd_add_uint32(&sc->rnd_source, rndval);
   10428 
   10429 	if (more) {
   10430 		/* Try to get more packets going. */
   10431 		wm_legacy_intr_disable(sc);
   10432 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10433 		wm_sched_handle_queue(sc, wmq);
   10434 	}
   10435 
   10436 	return 1;
   10437 }
   10438 
   10439 static inline void
   10440 wm_txrxintr_disable(struct wm_queue *wmq)
   10441 {
   10442 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10443 
   10444 	if (__predict_false(!wm_is_using_msix(sc))) {
   10445 		wm_legacy_intr_disable(sc);
   10446 		return;
   10447 	}
   10448 
   10449 	if (sc->sc_type == WM_T_82574)
   10450 		CSR_WRITE(sc, WMREG_IMC,
   10451 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10452 	else if (sc->sc_type == WM_T_82575)
   10453 		CSR_WRITE(sc, WMREG_EIMC,
   10454 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10455 	else
   10456 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10457 }
   10458 
   10459 static inline void
   10460 wm_txrxintr_enable(struct wm_queue *wmq)
   10461 {
   10462 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10463 
   10464 	wm_itrs_calculate(sc, wmq);
   10465 
   10466 	if (__predict_false(!wm_is_using_msix(sc))) {
   10467 		wm_legacy_intr_enable(sc);
   10468 		return;
   10469 	}
   10470 
   10471 	/*
   10472 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10473 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10474 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10475 	 * while each wm_handle_queue(wmq) is runnig.
   10476 	 */
   10477 	if (sc->sc_type == WM_T_82574)
   10478 		CSR_WRITE(sc, WMREG_IMS,
   10479 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10480 	else if (sc->sc_type == WM_T_82575)
   10481 		CSR_WRITE(sc, WMREG_EIMS,
   10482 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10483 	else
   10484 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10485 }
   10486 
   10487 static int
   10488 wm_txrxintr_msix(void *arg)
   10489 {
   10490 	struct wm_queue *wmq = arg;
   10491 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10492 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10493 	struct wm_softc *sc = txq->txq_sc;
   10494 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10495 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10496 	bool txmore;
   10497 	bool rxmore;
   10498 
   10499 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10500 
   10501 	DPRINTF(sc, WM_DEBUG_TX,
   10502 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10503 
   10504 	wm_txrxintr_disable(wmq);
   10505 
   10506 	mutex_enter(txq->txq_lock);
   10507 
   10508 	if (txq->txq_stopping) {
   10509 		mutex_exit(txq->txq_lock);
   10510 		return 1;
   10511 	}
   10512 
   10513 	WM_Q_EVCNT_INCR(txq, txdw);
   10514 	if (txlimit > 0) {
   10515 		txmore = wm_txeof(txq, txlimit);
   10516 		/* wm_deferred start() is done in wm_handle_queue(). */
   10517 	} else
   10518 		txmore = true;
   10519 	mutex_exit(txq->txq_lock);
   10520 
   10521 	DPRINTF(sc, WM_DEBUG_RX,
   10522 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10523 	mutex_enter(rxq->rxq_lock);
   10524 
   10525 	if (rxq->rxq_stopping) {
   10526 		mutex_exit(rxq->rxq_lock);
   10527 		return 1;
   10528 	}
   10529 
   10530 	WM_Q_EVCNT_INCR(rxq, intr);
   10531 	if (rxlimit > 0) {
   10532 		rxmore = wm_rxeof(rxq, rxlimit);
   10533 	} else
   10534 		rxmore = true;
   10535 	mutex_exit(rxq->rxq_lock);
   10536 
   10537 	wm_itrs_writereg(sc, wmq);
   10538 
   10539 	if (txmore || rxmore) {
   10540 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10541 		wm_sched_handle_queue(sc, wmq);
   10542 	} else
   10543 		wm_txrxintr_enable(wmq);
   10544 
   10545 	return 1;
   10546 }
   10547 
   10548 static void
   10549 wm_handle_queue(void *arg)
   10550 {
   10551 	struct wm_queue *wmq = arg;
   10552 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10553 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10554 	struct wm_softc *sc = txq->txq_sc;
   10555 	u_int txlimit = sc->sc_tx_process_limit;
   10556 	u_int rxlimit = sc->sc_rx_process_limit;
   10557 	bool txmore;
   10558 	bool rxmore;
   10559 
   10560 	mutex_enter(txq->txq_lock);
   10561 	if (txq->txq_stopping) {
   10562 		mutex_exit(txq->txq_lock);
   10563 		return;
   10564 	}
   10565 	txmore = wm_txeof(txq, txlimit);
   10566 	wm_deferred_start_locked(txq);
   10567 	mutex_exit(txq->txq_lock);
   10568 
   10569 	mutex_enter(rxq->rxq_lock);
   10570 	if (rxq->rxq_stopping) {
   10571 		mutex_exit(rxq->rxq_lock);
   10572 		return;
   10573 	}
   10574 	WM_Q_EVCNT_INCR(rxq, defer);
   10575 	rxmore = wm_rxeof(rxq, rxlimit);
   10576 	mutex_exit(rxq->rxq_lock);
   10577 
   10578 	if (txmore || rxmore) {
   10579 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10580 		wm_sched_handle_queue(sc, wmq);
   10581 	} else
   10582 		wm_txrxintr_enable(wmq);
   10583 }
   10584 
   10585 static void
   10586 wm_handle_queue_work(struct work *wk, void *context)
   10587 {
   10588 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10589 
   10590 	/*
   10591 	 * "enqueued flag" is not required here.
   10592 	 */
   10593 	wm_handle_queue(wmq);
   10594 }
   10595 
   10596 /*
   10597  * wm_linkintr_msix:
   10598  *
   10599  *	Interrupt service routine for link status change for MSI-X.
   10600  */
   10601 static int
   10602 wm_linkintr_msix(void *arg)
   10603 {
   10604 	struct wm_softc *sc = arg;
   10605 	uint32_t reg;
   10606 	bool has_rxo;
   10607 
   10608 	reg = CSR_READ(sc, WMREG_ICR);
   10609 	mutex_enter(sc->sc_core_lock);
   10610 	DPRINTF(sc, WM_DEBUG_LINK,
   10611 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10612 		device_xname(sc->sc_dev), reg));
   10613 
   10614 	if (sc->sc_core_stopping)
   10615 		goto out;
   10616 
   10617 	if ((reg & ICR_LSC) != 0) {
   10618 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10619 		wm_linkintr(sc, ICR_LSC);
   10620 	}
   10621 	if ((reg & ICR_GPI(0)) != 0)
   10622 		device_printf(sc->sc_dev, "got module interrupt\n");
   10623 
   10624 	/*
   10625 	 * XXX 82574 MSI-X mode workaround
   10626 	 *
   10627 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10628 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10629 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10630 	 * interrupts by writing WMREG_ICS to process receive packets.
   10631 	 */
   10632 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10633 #if defined(WM_DEBUG)
   10634 		log(LOG_WARNING, "%s: Receive overrun\n",
   10635 		    device_xname(sc->sc_dev));
   10636 #endif /* defined(WM_DEBUG) */
   10637 
   10638 		has_rxo = true;
   10639 		/*
   10640 		 * The RXO interrupt is very high rate when receive traffic is
   10641 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10642 		 * interrupts. ICR_OTHER will be enabled at the end of
   10643 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10644 		 * ICR_RXQ(1) interrupts.
   10645 		 */
   10646 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10647 
   10648 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10649 	}
   10650 
   10651 
   10652 
   10653 out:
   10654 	mutex_exit(sc->sc_core_lock);
   10655 
   10656 	if (sc->sc_type == WM_T_82574) {
   10657 		if (!has_rxo)
   10658 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10659 		else
   10660 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10661 	} else if (sc->sc_type == WM_T_82575)
   10662 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10663 	else
   10664 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10665 
   10666 	return 1;
   10667 }
   10668 
   10669 /*
   10670  * Media related.
   10671  * GMII, SGMII, TBI (and SERDES)
   10672  */
   10673 
   10674 /* Common */
   10675 
   10676 /*
   10677  * wm_tbi_serdes_set_linkled:
   10678  *
   10679  *	Update the link LED on TBI and SERDES devices.
   10680  */
   10681 static void
   10682 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10683 {
   10684 
   10685 	if (sc->sc_tbi_linkup)
   10686 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10687 	else
   10688 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10689 
   10690 	/* 82540 or newer devices are active low */
   10691 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10692 
   10693 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10694 }
   10695 
   10696 /* GMII related */
   10697 
   10698 /*
   10699  * wm_gmii_reset:
   10700  *
   10701  *	Reset the PHY.
   10702  */
   10703 static void
   10704 wm_gmii_reset(struct wm_softc *sc)
   10705 {
   10706 	uint32_t reg;
   10707 	int rv;
   10708 
   10709 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10710 		device_xname(sc->sc_dev), __func__));
   10711 
   10712 	rv = sc->phy.acquire(sc);
   10713 	if (rv != 0) {
   10714 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10715 		    __func__);
   10716 		return;
   10717 	}
   10718 
   10719 	switch (sc->sc_type) {
   10720 	case WM_T_82542_2_0:
   10721 	case WM_T_82542_2_1:
   10722 		/* null */
   10723 		break;
   10724 	case WM_T_82543:
   10725 		/*
   10726 		 * With 82543, we need to force speed and duplex on the MAC
   10727 		 * equal to what the PHY speed and duplex configuration is.
   10728 		 * In addition, we need to perform a hardware reset on the PHY
   10729 		 * to take it out of reset.
   10730 		 */
   10731 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10732 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10733 
   10734 		/* The PHY reset pin is active-low. */
   10735 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10736 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10737 		    CTRL_EXT_SWDPIN(4));
   10738 		reg |= CTRL_EXT_SWDPIO(4);
   10739 
   10740 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10741 		CSR_WRITE_FLUSH(sc);
   10742 		delay(10*1000);
   10743 
   10744 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10745 		CSR_WRITE_FLUSH(sc);
   10746 		delay(150);
   10747 #if 0
   10748 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10749 #endif
   10750 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10751 		break;
   10752 	case WM_T_82544:	/* Reset 10000us */
   10753 	case WM_T_82540:
   10754 	case WM_T_82545:
   10755 	case WM_T_82545_3:
   10756 	case WM_T_82546:
   10757 	case WM_T_82546_3:
   10758 	case WM_T_82541:
   10759 	case WM_T_82541_2:
   10760 	case WM_T_82547:
   10761 	case WM_T_82547_2:
   10762 	case WM_T_82571:	/* Reset 100us */
   10763 	case WM_T_82572:
   10764 	case WM_T_82573:
   10765 	case WM_T_82574:
   10766 	case WM_T_82575:
   10767 	case WM_T_82576:
   10768 	case WM_T_82580:
   10769 	case WM_T_I350:
   10770 	case WM_T_I354:
   10771 	case WM_T_I210:
   10772 	case WM_T_I211:
   10773 	case WM_T_82583:
   10774 	case WM_T_80003:
   10775 		/* Generic reset */
   10776 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10777 		CSR_WRITE_FLUSH(sc);
   10778 		delay(20000);
   10779 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10780 		CSR_WRITE_FLUSH(sc);
   10781 		delay(20000);
   10782 
   10783 		if ((sc->sc_type == WM_T_82541)
   10784 		    || (sc->sc_type == WM_T_82541_2)
   10785 		    || (sc->sc_type == WM_T_82547)
   10786 		    || (sc->sc_type == WM_T_82547_2)) {
   10787 			/* Workaround for igp are done in igp_reset() */
   10788 			/* XXX add code to set LED after phy reset */
   10789 		}
   10790 		break;
   10791 	case WM_T_ICH8:
   10792 	case WM_T_ICH9:
   10793 	case WM_T_ICH10:
   10794 	case WM_T_PCH:
   10795 	case WM_T_PCH2:
   10796 	case WM_T_PCH_LPT:
   10797 	case WM_T_PCH_SPT:
   10798 	case WM_T_PCH_CNP:
   10799 		/* Generic reset */
   10800 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10801 		CSR_WRITE_FLUSH(sc);
   10802 		delay(100);
   10803 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10804 		CSR_WRITE_FLUSH(sc);
   10805 		delay(150);
   10806 		break;
   10807 	default:
   10808 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10809 		    __func__);
   10810 		break;
   10811 	}
   10812 
   10813 	sc->phy.release(sc);
   10814 
   10815 	/* get_cfg_done */
   10816 	wm_get_cfg_done(sc);
   10817 
   10818 	/* Extra setup */
   10819 	switch (sc->sc_type) {
   10820 	case WM_T_82542_2_0:
   10821 	case WM_T_82542_2_1:
   10822 	case WM_T_82543:
   10823 	case WM_T_82544:
   10824 	case WM_T_82540:
   10825 	case WM_T_82545:
   10826 	case WM_T_82545_3:
   10827 	case WM_T_82546:
   10828 	case WM_T_82546_3:
   10829 	case WM_T_82541_2:
   10830 	case WM_T_82547_2:
   10831 	case WM_T_82571:
   10832 	case WM_T_82572:
   10833 	case WM_T_82573:
   10834 	case WM_T_82574:
   10835 	case WM_T_82583:
   10836 	case WM_T_82575:
   10837 	case WM_T_82576:
   10838 	case WM_T_82580:
   10839 	case WM_T_I350:
   10840 	case WM_T_I354:
   10841 	case WM_T_I210:
   10842 	case WM_T_I211:
   10843 	case WM_T_80003:
   10844 		/* Null */
   10845 		break;
   10846 	case WM_T_82541:
   10847 	case WM_T_82547:
   10848 		/* XXX Configure actively LED after PHY reset */
   10849 		break;
   10850 	case WM_T_ICH8:
   10851 	case WM_T_ICH9:
   10852 	case WM_T_ICH10:
   10853 	case WM_T_PCH:
   10854 	case WM_T_PCH2:
   10855 	case WM_T_PCH_LPT:
   10856 	case WM_T_PCH_SPT:
   10857 	case WM_T_PCH_CNP:
   10858 		wm_phy_post_reset(sc);
   10859 		break;
   10860 	default:
   10861 		panic("%s: unknown type\n", __func__);
   10862 		break;
   10863 	}
   10864 }
   10865 
   10866 /*
   10867  * Set up sc_phytype and mii_{read|write}reg.
   10868  *
   10869  *  To identify PHY type, correct read/write function should be selected.
   10870  * To select correct read/write function, PCI ID or MAC type are required
   10871  * without accessing PHY registers.
   10872  *
   10873  *  On the first call of this function, PHY ID is not known yet. Check
   10874  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10875  * result might be incorrect.
   10876  *
   10877  *  In the second call, PHY OUI and model is used to identify PHY type.
   10878  * It might not be perfect because of the lack of compared entry, but it
   10879  * would be better than the first call.
   10880  *
   10881  *  If the detected new result and previous assumption is different,
   10882  * a diagnostic message will be printed.
   10883  */
   10884 static void
   10885 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10886     uint16_t phy_model)
   10887 {
   10888 	device_t dev = sc->sc_dev;
   10889 	struct mii_data *mii = &sc->sc_mii;
   10890 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10891 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10892 	mii_readreg_t new_readreg;
   10893 	mii_writereg_t new_writereg;
   10894 	bool dodiag = true;
   10895 
   10896 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10897 		device_xname(sc->sc_dev), __func__));
   10898 
   10899 	/*
   10900 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10901 	 * incorrect. So don't print diag output when it's 2nd call.
   10902 	 */
   10903 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10904 		dodiag = false;
   10905 
   10906 	if (mii->mii_readreg == NULL) {
   10907 		/*
   10908 		 *  This is the first call of this function. For ICH and PCH
   10909 		 * variants, it's difficult to determine the PHY access method
   10910 		 * by sc_type, so use the PCI product ID for some devices.
   10911 		 */
   10912 
   10913 		switch (sc->sc_pcidevid) {
   10914 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10915 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10916 			/* 82577 */
   10917 			new_phytype = WMPHY_82577;
   10918 			break;
   10919 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10920 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10921 			/* 82578 */
   10922 			new_phytype = WMPHY_82578;
   10923 			break;
   10924 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10925 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10926 			/* 82579 */
   10927 			new_phytype = WMPHY_82579;
   10928 			break;
   10929 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10930 		case PCI_PRODUCT_INTEL_82801I_BM:
   10931 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10932 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10933 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10934 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10935 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10936 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10937 			/* ICH8, 9, 10 with 82567 */
   10938 			new_phytype = WMPHY_BM;
   10939 			break;
   10940 		default:
   10941 			break;
   10942 		}
   10943 	} else {
   10944 		/* It's not the first call. Use PHY OUI and model */
   10945 		switch (phy_oui) {
   10946 		case MII_OUI_ATTANSIC: /* atphy(4) */
   10947 			switch (phy_model) {
   10948 			case MII_MODEL_ATTANSIC_AR8021:
   10949 				new_phytype = WMPHY_82578;
   10950 				break;
   10951 			default:
   10952 				break;
   10953 			}
   10954 			break;
   10955 		case MII_OUI_xxMARVELL:
   10956 			switch (phy_model) {
   10957 			case MII_MODEL_xxMARVELL_I210:
   10958 				new_phytype = WMPHY_I210;
   10959 				break;
   10960 			case MII_MODEL_xxMARVELL_E1011:
   10961 			case MII_MODEL_xxMARVELL_E1000_3:
   10962 			case MII_MODEL_xxMARVELL_E1000_5:
   10963 			case MII_MODEL_xxMARVELL_E1112:
   10964 				new_phytype = WMPHY_M88;
   10965 				break;
   10966 			case MII_MODEL_xxMARVELL_E1149:
   10967 				new_phytype = WMPHY_BM;
   10968 				break;
   10969 			case MII_MODEL_xxMARVELL_E1111:
   10970 			case MII_MODEL_xxMARVELL_I347:
   10971 			case MII_MODEL_xxMARVELL_E1512:
   10972 			case MII_MODEL_xxMARVELL_E1340M:
   10973 			case MII_MODEL_xxMARVELL_E1543:
   10974 				new_phytype = WMPHY_M88;
   10975 				break;
   10976 			case MII_MODEL_xxMARVELL_I82563:
   10977 				new_phytype = WMPHY_GG82563;
   10978 				break;
   10979 			default:
   10980 				break;
   10981 			}
   10982 			break;
   10983 		case MII_OUI_INTEL:
   10984 			switch (phy_model) {
   10985 			case MII_MODEL_INTEL_I82577:
   10986 				new_phytype = WMPHY_82577;
   10987 				break;
   10988 			case MII_MODEL_INTEL_I82579:
   10989 				new_phytype = WMPHY_82579;
   10990 				break;
   10991 			case MII_MODEL_INTEL_I217:
   10992 				new_phytype = WMPHY_I217;
   10993 				break;
   10994 			case MII_MODEL_INTEL_I82580:
   10995 				new_phytype = WMPHY_82580;
   10996 				break;
   10997 			case MII_MODEL_INTEL_I350:
   10998 				new_phytype = WMPHY_I350;
   10999 				break;
   11000 			default:
   11001 				break;
   11002 			}
   11003 			break;
   11004 		case MII_OUI_yyINTEL:
   11005 			switch (phy_model) {
   11006 			case MII_MODEL_yyINTEL_I82562G:
   11007 			case MII_MODEL_yyINTEL_I82562EM:
   11008 			case MII_MODEL_yyINTEL_I82562ET:
   11009 				new_phytype = WMPHY_IFE;
   11010 				break;
   11011 			case MII_MODEL_yyINTEL_IGP01E1000:
   11012 				new_phytype = WMPHY_IGP;
   11013 				break;
   11014 			case MII_MODEL_yyINTEL_I82566:
   11015 				new_phytype = WMPHY_IGP_3;
   11016 				break;
   11017 			default:
   11018 				break;
   11019 			}
   11020 			break;
   11021 		default:
   11022 			break;
   11023 		}
   11024 
   11025 		if (dodiag) {
   11026 			if (new_phytype == WMPHY_UNKNOWN)
   11027 				aprint_verbose_dev(dev,
   11028 				    "%s: Unknown PHY model. OUI=%06x, "
   11029 				    "model=%04x\n", __func__, phy_oui,
   11030 				    phy_model);
   11031 
   11032 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11033 			    && (sc->sc_phytype != new_phytype)) {
   11034 				aprint_error_dev(dev, "Previously assumed PHY "
   11035 				    "type(%u) was incorrect. PHY type from PHY"
   11036 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11037 			}
   11038 		}
   11039 	}
   11040 
   11041 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11042 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11043 		/* SGMII */
   11044 		new_readreg = wm_sgmii_readreg;
   11045 		new_writereg = wm_sgmii_writereg;
   11046 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11047 		/* BM2 (phyaddr == 1) */
   11048 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11049 		    && (new_phytype != WMPHY_BM)
   11050 		    && (new_phytype != WMPHY_UNKNOWN))
   11051 			doubt_phytype = new_phytype;
   11052 		new_phytype = WMPHY_BM;
   11053 		new_readreg = wm_gmii_bm_readreg;
   11054 		new_writereg = wm_gmii_bm_writereg;
   11055 	} else if (sc->sc_type >= WM_T_PCH) {
   11056 		/* All PCH* use _hv_ */
   11057 		new_readreg = wm_gmii_hv_readreg;
   11058 		new_writereg = wm_gmii_hv_writereg;
   11059 	} else if (sc->sc_type >= WM_T_ICH8) {
   11060 		/* non-82567 ICH8, 9 and 10 */
   11061 		new_readreg = wm_gmii_i82544_readreg;
   11062 		new_writereg = wm_gmii_i82544_writereg;
   11063 	} else if (sc->sc_type >= WM_T_80003) {
   11064 		/* 80003 */
   11065 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11066 		    && (new_phytype != WMPHY_GG82563)
   11067 		    && (new_phytype != WMPHY_UNKNOWN))
   11068 			doubt_phytype = new_phytype;
   11069 		new_phytype = WMPHY_GG82563;
   11070 		new_readreg = wm_gmii_i80003_readreg;
   11071 		new_writereg = wm_gmii_i80003_writereg;
   11072 	} else if (sc->sc_type >= WM_T_I210) {
   11073 		/* I210 and I211 */
   11074 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11075 		    && (new_phytype != WMPHY_I210)
   11076 		    && (new_phytype != WMPHY_UNKNOWN))
   11077 			doubt_phytype = new_phytype;
   11078 		new_phytype = WMPHY_I210;
   11079 		new_readreg = wm_gmii_gs40g_readreg;
   11080 		new_writereg = wm_gmii_gs40g_writereg;
   11081 	} else if (sc->sc_type >= WM_T_82580) {
   11082 		/* 82580, I350 and I354 */
   11083 		new_readreg = wm_gmii_82580_readreg;
   11084 		new_writereg = wm_gmii_82580_writereg;
   11085 	} else if (sc->sc_type >= WM_T_82544) {
   11086 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11087 		new_readreg = wm_gmii_i82544_readreg;
   11088 		new_writereg = wm_gmii_i82544_writereg;
   11089 	} else {
   11090 		new_readreg = wm_gmii_i82543_readreg;
   11091 		new_writereg = wm_gmii_i82543_writereg;
   11092 	}
   11093 
   11094 	if (new_phytype == WMPHY_BM) {
   11095 		/* All BM use _bm_ */
   11096 		new_readreg = wm_gmii_bm_readreg;
   11097 		new_writereg = wm_gmii_bm_writereg;
   11098 	}
   11099 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11100 		/* All PCH* use _hv_ */
   11101 		new_readreg = wm_gmii_hv_readreg;
   11102 		new_writereg = wm_gmii_hv_writereg;
   11103 	}
   11104 
   11105 	/* Diag output */
   11106 	if (dodiag) {
   11107 		if (doubt_phytype != WMPHY_UNKNOWN)
   11108 			aprint_error_dev(dev, "Assumed new PHY type was "
   11109 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11110 			    new_phytype);
   11111 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11112 		    && (sc->sc_phytype != new_phytype))
   11113 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11114 			    "was incorrect. New PHY type = %u\n",
   11115 			    sc->sc_phytype, new_phytype);
   11116 
   11117 		if ((mii->mii_readreg != NULL) &&
   11118 		    (new_phytype == WMPHY_UNKNOWN))
   11119 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11120 
   11121 		if ((mii->mii_readreg != NULL) &&
   11122 		    (mii->mii_readreg != new_readreg))
   11123 			aprint_error_dev(dev, "Previously assumed PHY "
   11124 			    "read/write function was incorrect.\n");
   11125 	}
   11126 
   11127 	/* Update now */
   11128 	sc->sc_phytype = new_phytype;
   11129 	mii->mii_readreg = new_readreg;
   11130 	mii->mii_writereg = new_writereg;
   11131 	if (new_readreg == wm_gmii_hv_readreg) {
   11132 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11133 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11134 	} else if (new_readreg == wm_sgmii_readreg) {
   11135 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11136 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11137 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11138 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11139 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11140 	}
   11141 }
   11142 
   11143 /*
   11144  * wm_get_phy_id_82575:
   11145  *
   11146  * Return PHY ID. Return -1 if it failed.
   11147  */
   11148 static int
   11149 wm_get_phy_id_82575(struct wm_softc *sc)
   11150 {
   11151 	uint32_t reg;
   11152 	int phyid = -1;
   11153 
   11154 	/* XXX */
   11155 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11156 		return -1;
   11157 
   11158 	if (wm_sgmii_uses_mdio(sc)) {
   11159 		switch (sc->sc_type) {
   11160 		case WM_T_82575:
   11161 		case WM_T_82576:
   11162 			reg = CSR_READ(sc, WMREG_MDIC);
   11163 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11164 			break;
   11165 		case WM_T_82580:
   11166 		case WM_T_I350:
   11167 		case WM_T_I354:
   11168 		case WM_T_I210:
   11169 		case WM_T_I211:
   11170 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11171 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11172 			break;
   11173 		default:
   11174 			return -1;
   11175 		}
   11176 	}
   11177 
   11178 	return phyid;
   11179 }
   11180 
   11181 /*
   11182  * wm_gmii_mediainit:
   11183  *
   11184  *	Initialize media for use on 1000BASE-T devices.
   11185  */
   11186 static void
   11187 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11188 {
   11189 	device_t dev = sc->sc_dev;
   11190 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11191 	struct mii_data *mii = &sc->sc_mii;
   11192 
   11193 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11194 		device_xname(sc->sc_dev), __func__));
   11195 
   11196 	/* We have GMII. */
   11197 	sc->sc_flags |= WM_F_HAS_MII;
   11198 
   11199 	if (sc->sc_type == WM_T_80003)
   11200 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11201 	else
   11202 		sc->sc_tipg = TIPG_1000T_DFLT;
   11203 
   11204 	/*
   11205 	 * Let the chip set speed/duplex on its own based on
   11206 	 * signals from the PHY.
   11207 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11208 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11209 	 */
   11210 	sc->sc_ctrl |= CTRL_SLU;
   11211 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11212 
   11213 	/* Initialize our media structures and probe the GMII. */
   11214 	mii->mii_ifp = ifp;
   11215 
   11216 	mii->mii_statchg = wm_gmii_statchg;
   11217 
   11218 	/* get PHY control from SMBus to PCIe */
   11219 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11220 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11221 	    || (sc->sc_type == WM_T_PCH_CNP))
   11222 		wm_init_phy_workarounds_pchlan(sc);
   11223 
   11224 	wm_gmii_reset(sc);
   11225 
   11226 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11227 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11228 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11229 
   11230 	/* Setup internal SGMII PHY for SFP */
   11231 	wm_sgmii_sfp_preconfig(sc);
   11232 
   11233 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11234 	    || (sc->sc_type == WM_T_82580)
   11235 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11236 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11237 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11238 			/* Attach only one port */
   11239 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11240 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11241 		} else {
   11242 			int i, id;
   11243 			uint32_t ctrl_ext;
   11244 
   11245 			id = wm_get_phy_id_82575(sc);
   11246 			if (id != -1) {
   11247 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11248 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11249 			}
   11250 			if ((id == -1)
   11251 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11252 				/* Power on sgmii phy if it is disabled */
   11253 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11254 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11255 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11256 				CSR_WRITE_FLUSH(sc);
   11257 				delay(300*1000); /* XXX too long */
   11258 
   11259 				/*
   11260 				 * From 1 to 8.
   11261 				 *
   11262 				 * I2C access fails with I2C register's ERROR
   11263 				 * bit set, so prevent error message while
   11264 				 * scanning.
   11265 				 */
   11266 				sc->phy.no_errprint = true;
   11267 				for (i = 1; i < 8; i++)
   11268 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11269 					    0xffffffff, i, MII_OFFSET_ANY,
   11270 					    MIIF_DOPAUSE);
   11271 				sc->phy.no_errprint = false;
   11272 
   11273 				/* Restore previous sfp cage power state */
   11274 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11275 			}
   11276 		}
   11277 	} else
   11278 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11279 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11280 
   11281 	/*
   11282 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11283 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11284 	 */
   11285 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11286 		|| (sc->sc_type == WM_T_PCH_SPT)
   11287 		|| (sc->sc_type == WM_T_PCH_CNP))
   11288 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11289 		wm_set_mdio_slow_mode_hv(sc);
   11290 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11291 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11292 	}
   11293 
   11294 	/*
   11295 	 * (For ICH8 variants)
   11296 	 * If PHY detection failed, use BM's r/w function and retry.
   11297 	 */
   11298 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11299 		/* if failed, retry with *_bm_* */
   11300 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11301 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11302 		    sc->sc_phytype);
   11303 		sc->sc_phytype = WMPHY_BM;
   11304 		mii->mii_readreg = wm_gmii_bm_readreg;
   11305 		mii->mii_writereg = wm_gmii_bm_writereg;
   11306 
   11307 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11308 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11309 	}
   11310 
   11311 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11312 		/* Any PHY wasn't found */
   11313 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11314 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11315 		sc->sc_phytype = WMPHY_NONE;
   11316 	} else {
   11317 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11318 
   11319 		/*
   11320 		 * PHY found! Check PHY type again by the second call of
   11321 		 * wm_gmii_setup_phytype.
   11322 		 */
   11323 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11324 		    child->mii_mpd_model);
   11325 
   11326 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11327 	}
   11328 }
   11329 
   11330 /*
   11331  * wm_gmii_mediachange:	[ifmedia interface function]
   11332  *
   11333  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11334  */
   11335 static int
   11336 wm_gmii_mediachange(struct ifnet *ifp)
   11337 {
   11338 	struct wm_softc *sc = ifp->if_softc;
   11339 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11340 	uint32_t reg;
   11341 	int rc;
   11342 
   11343 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11344 		device_xname(sc->sc_dev), __func__));
   11345 
   11346 	KASSERT(mutex_owned(sc->sc_core_lock));
   11347 
   11348 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11349 		return 0;
   11350 
   11351 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11352 	if ((sc->sc_type == WM_T_82580)
   11353 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11354 	    || (sc->sc_type == WM_T_I211)) {
   11355 		reg = CSR_READ(sc, WMREG_PHPM);
   11356 		reg &= ~PHPM_GO_LINK_D;
   11357 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11358 	}
   11359 
   11360 	/* Disable D0 LPLU. */
   11361 	wm_lplu_d0_disable(sc);
   11362 
   11363 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11364 	sc->sc_ctrl |= CTRL_SLU;
   11365 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11366 	    || (sc->sc_type > WM_T_82543)) {
   11367 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11368 	} else {
   11369 		sc->sc_ctrl &= ~CTRL_ASDE;
   11370 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11371 		if (ife->ifm_media & IFM_FDX)
   11372 			sc->sc_ctrl |= CTRL_FD;
   11373 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11374 		case IFM_10_T:
   11375 			sc->sc_ctrl |= CTRL_SPEED_10;
   11376 			break;
   11377 		case IFM_100_TX:
   11378 			sc->sc_ctrl |= CTRL_SPEED_100;
   11379 			break;
   11380 		case IFM_1000_T:
   11381 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11382 			break;
   11383 		case IFM_NONE:
   11384 			/* There is no specific setting for IFM_NONE */
   11385 			break;
   11386 		default:
   11387 			panic("wm_gmii_mediachange: bad media 0x%x",
   11388 			    ife->ifm_media);
   11389 		}
   11390 	}
   11391 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11392 	CSR_WRITE_FLUSH(sc);
   11393 
   11394 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11395 		wm_serdes_mediachange(ifp);
   11396 
   11397 	if (sc->sc_type <= WM_T_82543)
   11398 		wm_gmii_reset(sc);
   11399 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11400 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11401 		/* allow time for SFP cage time to power up phy */
   11402 		delay(300 * 1000);
   11403 		wm_gmii_reset(sc);
   11404 	}
   11405 
   11406 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11407 		return 0;
   11408 	return rc;
   11409 }
   11410 
   11411 /*
   11412  * wm_gmii_mediastatus:	[ifmedia interface function]
   11413  *
   11414  *	Get the current interface media status on a 1000BASE-T device.
   11415  */
   11416 static void
   11417 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11418 {
   11419 	struct wm_softc *sc = ifp->if_softc;
   11420 
   11421 	KASSERT(mutex_owned(sc->sc_core_lock));
   11422 
   11423 	ether_mediastatus(ifp, ifmr);
   11424 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11425 	    | sc->sc_flowflags;
   11426 }
   11427 
   11428 #define	MDI_IO		CTRL_SWDPIN(2)
   11429 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11430 #define	MDI_CLK		CTRL_SWDPIN(3)
   11431 
   11432 static void
   11433 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11434 {
   11435 	uint32_t i, v;
   11436 
   11437 	v = CSR_READ(sc, WMREG_CTRL);
   11438 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11439 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11440 
   11441 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11442 		if (data & i)
   11443 			v |= MDI_IO;
   11444 		else
   11445 			v &= ~MDI_IO;
   11446 		CSR_WRITE(sc, WMREG_CTRL, v);
   11447 		CSR_WRITE_FLUSH(sc);
   11448 		delay(10);
   11449 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11450 		CSR_WRITE_FLUSH(sc);
   11451 		delay(10);
   11452 		CSR_WRITE(sc, WMREG_CTRL, v);
   11453 		CSR_WRITE_FLUSH(sc);
   11454 		delay(10);
   11455 	}
   11456 }
   11457 
   11458 static uint16_t
   11459 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11460 {
   11461 	uint32_t v, i;
   11462 	uint16_t data = 0;
   11463 
   11464 	v = CSR_READ(sc, WMREG_CTRL);
   11465 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11466 	v |= CTRL_SWDPIO(3);
   11467 
   11468 	CSR_WRITE(sc, WMREG_CTRL, v);
   11469 	CSR_WRITE_FLUSH(sc);
   11470 	delay(10);
   11471 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11472 	CSR_WRITE_FLUSH(sc);
   11473 	delay(10);
   11474 	CSR_WRITE(sc, WMREG_CTRL, v);
   11475 	CSR_WRITE_FLUSH(sc);
   11476 	delay(10);
   11477 
   11478 	for (i = 0; i < 16; i++) {
   11479 		data <<= 1;
   11480 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11481 		CSR_WRITE_FLUSH(sc);
   11482 		delay(10);
   11483 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11484 			data |= 1;
   11485 		CSR_WRITE(sc, WMREG_CTRL, v);
   11486 		CSR_WRITE_FLUSH(sc);
   11487 		delay(10);
   11488 	}
   11489 
   11490 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11491 	CSR_WRITE_FLUSH(sc);
   11492 	delay(10);
   11493 	CSR_WRITE(sc, WMREG_CTRL, v);
   11494 	CSR_WRITE_FLUSH(sc);
   11495 	delay(10);
   11496 
   11497 	return data;
   11498 }
   11499 
   11500 #undef MDI_IO
   11501 #undef MDI_DIR
   11502 #undef MDI_CLK
   11503 
   11504 /*
   11505  * wm_gmii_i82543_readreg:	[mii interface function]
   11506  *
   11507  *	Read a PHY register on the GMII (i82543 version).
   11508  */
   11509 static int
   11510 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11511 {
   11512 	struct wm_softc *sc = device_private(dev);
   11513 
   11514 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11515 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11516 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11517 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11518 
   11519 	DPRINTF(sc, WM_DEBUG_GMII,
   11520 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11521 		device_xname(dev), phy, reg, *val));
   11522 
   11523 	return 0;
   11524 }
   11525 
   11526 /*
   11527  * wm_gmii_i82543_writereg:	[mii interface function]
   11528  *
   11529  *	Write a PHY register on the GMII (i82543 version).
   11530  */
   11531 static int
   11532 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11533 {
   11534 	struct wm_softc *sc = device_private(dev);
   11535 
   11536 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11537 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11538 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11539 	    (MII_COMMAND_START << 30), 32);
   11540 
   11541 	return 0;
   11542 }
   11543 
   11544 /*
   11545  * wm_gmii_mdic_readreg:	[mii interface function]
   11546  *
   11547  *	Read a PHY register on the GMII.
   11548  */
   11549 static int
   11550 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11551 {
   11552 	struct wm_softc *sc = device_private(dev);
   11553 	uint32_t mdic = 0;
   11554 	int i;
   11555 
   11556 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11557 	    && (reg > MII_ADDRMASK)) {
   11558 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11559 		    __func__, sc->sc_phytype, reg);
   11560 		reg &= MII_ADDRMASK;
   11561 	}
   11562 
   11563 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11564 	    MDIC_REGADD(reg));
   11565 
   11566 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11567 		delay(50);
   11568 		mdic = CSR_READ(sc, WMREG_MDIC);
   11569 		if (mdic & MDIC_READY)
   11570 			break;
   11571 	}
   11572 
   11573 	if ((mdic & MDIC_READY) == 0) {
   11574 		DPRINTF(sc, WM_DEBUG_GMII,
   11575 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11576 			device_xname(dev), phy, reg));
   11577 		return ETIMEDOUT;
   11578 	} else if (mdic & MDIC_E) {
   11579 		/* This is normal if no PHY is present. */
   11580 		DPRINTF(sc, WM_DEBUG_GMII,
   11581 		    ("%s: MDIC read error: phy %d reg %d\n",
   11582 			device_xname(sc->sc_dev), phy, reg));
   11583 		return -1;
   11584 	} else
   11585 		*val = MDIC_DATA(mdic);
   11586 
   11587 	/*
   11588 	 * Allow some time after each MDIC transaction to avoid
   11589 	 * reading duplicate data in the next MDIC transaction.
   11590 	 */
   11591 	if (sc->sc_type == WM_T_PCH2)
   11592 		delay(100);
   11593 
   11594 	return 0;
   11595 }
   11596 
   11597 /*
   11598  * wm_gmii_mdic_writereg:	[mii interface function]
   11599  *
   11600  *	Write a PHY register on the GMII.
   11601  */
   11602 static int
   11603 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11604 {
   11605 	struct wm_softc *sc = device_private(dev);
   11606 	uint32_t mdic = 0;
   11607 	int i;
   11608 
   11609 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11610 	    && (reg > MII_ADDRMASK)) {
   11611 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11612 		    __func__, sc->sc_phytype, reg);
   11613 		reg &= MII_ADDRMASK;
   11614 	}
   11615 
   11616 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11617 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11618 
   11619 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11620 		delay(50);
   11621 		mdic = CSR_READ(sc, WMREG_MDIC);
   11622 		if (mdic & MDIC_READY)
   11623 			break;
   11624 	}
   11625 
   11626 	if ((mdic & MDIC_READY) == 0) {
   11627 		DPRINTF(sc, WM_DEBUG_GMII,
   11628 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11629 			device_xname(dev), phy, reg));
   11630 		return ETIMEDOUT;
   11631 	} else if (mdic & MDIC_E) {
   11632 		DPRINTF(sc, WM_DEBUG_GMII,
   11633 		    ("%s: MDIC write error: phy %d reg %d\n",
   11634 			device_xname(dev), phy, reg));
   11635 		return -1;
   11636 	}
   11637 
   11638 	/*
   11639 	 * Allow some time after each MDIC transaction to avoid
   11640 	 * reading duplicate data in the next MDIC transaction.
   11641 	 */
   11642 	if (sc->sc_type == WM_T_PCH2)
   11643 		delay(100);
   11644 
   11645 	return 0;
   11646 }
   11647 
   11648 /*
   11649  * wm_gmii_i82544_readreg:	[mii interface function]
   11650  *
   11651  *	Read a PHY register on the GMII.
   11652  */
   11653 static int
   11654 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11655 {
   11656 	struct wm_softc *sc = device_private(dev);
   11657 	int rv;
   11658 
   11659 	rv = sc->phy.acquire(sc);
   11660 	if (rv != 0) {
   11661 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11662 		return rv;
   11663 	}
   11664 
   11665 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11666 
   11667 	sc->phy.release(sc);
   11668 
   11669 	return rv;
   11670 }
   11671 
   11672 static int
   11673 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11674 {
   11675 	struct wm_softc *sc = device_private(dev);
   11676 	int rv;
   11677 
   11678 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11679 		switch (sc->sc_phytype) {
   11680 		case WMPHY_IGP:
   11681 		case WMPHY_IGP_2:
   11682 		case WMPHY_IGP_3:
   11683 			rv = wm_gmii_mdic_writereg(dev, phy,
   11684 			    IGPHY_PAGE_SELECT, reg);
   11685 			if (rv != 0)
   11686 				return rv;
   11687 			break;
   11688 		default:
   11689 #ifdef WM_DEBUG
   11690 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11691 			    __func__, sc->sc_phytype, reg);
   11692 #endif
   11693 			break;
   11694 		}
   11695 	}
   11696 
   11697 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11698 }
   11699 
   11700 /*
   11701  * wm_gmii_i82544_writereg:	[mii interface function]
   11702  *
   11703  *	Write a PHY register on the GMII.
   11704  */
   11705 static int
   11706 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11707 {
   11708 	struct wm_softc *sc = device_private(dev);
   11709 	int rv;
   11710 
   11711 	rv = sc->phy.acquire(sc);
   11712 	if (rv != 0) {
   11713 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11714 		return rv;
   11715 	}
   11716 
   11717 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11718 	sc->phy.release(sc);
   11719 
   11720 	return rv;
   11721 }
   11722 
   11723 static int
   11724 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11725 {
   11726 	struct wm_softc *sc = device_private(dev);
   11727 	int rv;
   11728 
   11729 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11730 		switch (sc->sc_phytype) {
   11731 		case WMPHY_IGP:
   11732 		case WMPHY_IGP_2:
   11733 		case WMPHY_IGP_3:
   11734 			rv = wm_gmii_mdic_writereg(dev, phy,
   11735 			    IGPHY_PAGE_SELECT, reg);
   11736 			if (rv != 0)
   11737 				return rv;
   11738 			break;
   11739 		default:
   11740 #ifdef WM_DEBUG
   11741 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11742 			    __func__, sc->sc_phytype, reg);
   11743 #endif
   11744 			break;
   11745 		}
   11746 	}
   11747 
   11748 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11749 }
   11750 
   11751 /*
   11752  * wm_gmii_i80003_readreg:	[mii interface function]
   11753  *
   11754  *	Read a PHY register on the kumeran
   11755  * This could be handled by the PHY layer if we didn't have to lock the
   11756  * resource ...
   11757  */
   11758 static int
   11759 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11760 {
   11761 	struct wm_softc *sc = device_private(dev);
   11762 	int page_select;
   11763 	uint16_t temp, temp2;
   11764 	int rv;
   11765 
   11766 	if (phy != 1) /* Only one PHY on kumeran bus */
   11767 		return -1;
   11768 
   11769 	rv = sc->phy.acquire(sc);
   11770 	if (rv != 0) {
   11771 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11772 		return rv;
   11773 	}
   11774 
   11775 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11776 		page_select = GG82563_PHY_PAGE_SELECT;
   11777 	else {
   11778 		/*
   11779 		 * Use Alternative Page Select register to access registers
   11780 		 * 30 and 31.
   11781 		 */
   11782 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11783 	}
   11784 	temp = reg >> GG82563_PAGE_SHIFT;
   11785 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11786 		goto out;
   11787 
   11788 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11789 		/*
   11790 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11791 		 * register.
   11792 		 */
   11793 		delay(200);
   11794 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11795 		if ((rv != 0) || (temp2 != temp)) {
   11796 			device_printf(dev, "%s failed\n", __func__);
   11797 			rv = -1;
   11798 			goto out;
   11799 		}
   11800 		delay(200);
   11801 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11802 		delay(200);
   11803 	} else
   11804 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11805 
   11806 out:
   11807 	sc->phy.release(sc);
   11808 	return rv;
   11809 }
   11810 
   11811 /*
   11812  * wm_gmii_i80003_writereg:	[mii interface function]
   11813  *
   11814  *	Write a PHY register on the kumeran.
   11815  * This could be handled by the PHY layer if we didn't have to lock the
   11816  * resource ...
   11817  */
   11818 static int
   11819 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11820 {
   11821 	struct wm_softc *sc = device_private(dev);
   11822 	int page_select, rv;
   11823 	uint16_t temp, temp2;
   11824 
   11825 	if (phy != 1) /* Only one PHY on kumeran bus */
   11826 		return -1;
   11827 
   11828 	rv = sc->phy.acquire(sc);
   11829 	if (rv != 0) {
   11830 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11831 		return rv;
   11832 	}
   11833 
   11834 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11835 		page_select = GG82563_PHY_PAGE_SELECT;
   11836 	else {
   11837 		/*
   11838 		 * Use Alternative Page Select register to access registers
   11839 		 * 30 and 31.
   11840 		 */
   11841 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11842 	}
   11843 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11844 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11845 		goto out;
   11846 
   11847 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11848 		/*
   11849 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11850 		 * register.
   11851 		 */
   11852 		delay(200);
   11853 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11854 		if ((rv != 0) || (temp2 != temp)) {
   11855 			device_printf(dev, "%s failed\n", __func__);
   11856 			rv = -1;
   11857 			goto out;
   11858 		}
   11859 		delay(200);
   11860 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11861 		delay(200);
   11862 	} else
   11863 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11864 
   11865 out:
   11866 	sc->phy.release(sc);
   11867 	return rv;
   11868 }
   11869 
   11870 /*
   11871  * wm_gmii_bm_readreg:	[mii interface function]
   11872  *
   11873  *	Read a PHY register on the kumeran
   11874  * This could be handled by the PHY layer if we didn't have to lock the
   11875  * resource ...
   11876  */
   11877 static int
   11878 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11879 {
   11880 	struct wm_softc *sc = device_private(dev);
   11881 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11882 	int rv;
   11883 
   11884 	rv = sc->phy.acquire(sc);
   11885 	if (rv != 0) {
   11886 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11887 		return rv;
   11888 	}
   11889 
   11890 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11891 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11892 		    || (reg == 31)) ? 1 : phy;
   11893 	/* Page 800 works differently than the rest so it has its own func */
   11894 	if (page == BM_WUC_PAGE) {
   11895 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11896 		goto release;
   11897 	}
   11898 
   11899 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11900 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11901 		    && (sc->sc_type != WM_T_82583))
   11902 			rv = wm_gmii_mdic_writereg(dev, phy,
   11903 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11904 		else
   11905 			rv = wm_gmii_mdic_writereg(dev, phy,
   11906 			    BME1000_PHY_PAGE_SELECT, page);
   11907 		if (rv != 0)
   11908 			goto release;
   11909 	}
   11910 
   11911 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11912 
   11913 release:
   11914 	sc->phy.release(sc);
   11915 	return rv;
   11916 }
   11917 
   11918 /*
   11919  * wm_gmii_bm_writereg:	[mii interface function]
   11920  *
   11921  *	Write a PHY register on the kumeran.
   11922  * This could be handled by the PHY layer if we didn't have to lock the
   11923  * resource ...
   11924  */
   11925 static int
   11926 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11927 {
   11928 	struct wm_softc *sc = device_private(dev);
   11929 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11930 	int rv;
   11931 
   11932 	rv = sc->phy.acquire(sc);
   11933 	if (rv != 0) {
   11934 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11935 		return rv;
   11936 	}
   11937 
   11938 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11939 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11940 		    || (reg == 31)) ? 1 : phy;
   11941 	/* Page 800 works differently than the rest so it has its own func */
   11942 	if (page == BM_WUC_PAGE) {
   11943 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11944 		goto release;
   11945 	}
   11946 
   11947 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11948 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11949 		    && (sc->sc_type != WM_T_82583))
   11950 			rv = wm_gmii_mdic_writereg(dev, phy,
   11951 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11952 		else
   11953 			rv = wm_gmii_mdic_writereg(dev, phy,
   11954 			    BME1000_PHY_PAGE_SELECT, page);
   11955 		if (rv != 0)
   11956 			goto release;
   11957 	}
   11958 
   11959 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11960 
   11961 release:
   11962 	sc->phy.release(sc);
   11963 	return rv;
   11964 }
   11965 
   11966 /*
   11967  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11968  *  @dev: pointer to the HW structure
   11969  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11970  *
   11971  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11972  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11973  */
   11974 static int
   11975 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11976 {
   11977 #ifdef WM_DEBUG
   11978 	struct wm_softc *sc = device_private(dev);
   11979 #endif
   11980 	uint16_t temp;
   11981 	int rv;
   11982 
   11983 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11984 		device_xname(dev), __func__));
   11985 
   11986 	if (!phy_regp)
   11987 		return -1;
   11988 
   11989 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11990 
   11991 	/* Select Port Control Registers page */
   11992 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11993 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11994 	if (rv != 0)
   11995 		return rv;
   11996 
   11997 	/* Read WUCE and save it */
   11998 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11999 	if (rv != 0)
   12000 		return rv;
   12001 
   12002 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12003 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12004 	 */
   12005 	temp = *phy_regp;
   12006 	temp |= BM_WUC_ENABLE_BIT;
   12007 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12008 
   12009 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12010 		return rv;
   12011 
   12012 	/* Select Host Wakeup Registers page - caller now able to write
   12013 	 * registers on the Wakeup registers page
   12014 	 */
   12015 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12016 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12017 }
   12018 
   12019 /*
   12020  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12021  *  @dev: pointer to the HW structure
   12022  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12023  *
   12024  *  Restore BM_WUC_ENABLE_REG to its original value.
   12025  *
   12026  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12027  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12028  *  caller.
   12029  */
   12030 static int
   12031 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12032 {
   12033 #ifdef WM_DEBUG
   12034 	struct wm_softc *sc = device_private(dev);
   12035 #endif
   12036 
   12037 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12038 		device_xname(dev), __func__));
   12039 
   12040 	if (!phy_regp)
   12041 		return -1;
   12042 
   12043 	/* Select Port Control Registers page */
   12044 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12045 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12046 
   12047 	/* Restore 769.17 to its original value */
   12048 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12049 
   12050 	return 0;
   12051 }
   12052 
   12053 /*
   12054  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12055  *  @sc: pointer to the HW structure
   12056  *  @offset: register offset to be read or written
   12057  *  @val: pointer to the data to read or write
   12058  *  @rd: determines if operation is read or write
   12059  *  @page_set: BM_WUC_PAGE already set and access enabled
   12060  *
   12061  *  Read the PHY register at offset and store the retrieved information in
   12062  *  data, or write data to PHY register at offset.  Note the procedure to
   12063  *  access the PHY wakeup registers is different than reading the other PHY
   12064  *  registers. It works as such:
   12065  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12066  *  2) Set page to 800 for host (801 if we were manageability)
   12067  *  3) Write the address using the address opcode (0x11)
   12068  *  4) Read or write the data using the data opcode (0x12)
   12069  *  5) Restore 769.17.2 to its original value
   12070  *
   12071  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12072  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12073  *
   12074  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12075  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12076  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12077  */
   12078 static int
   12079 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12080     bool page_set)
   12081 {
   12082 	struct wm_softc *sc = device_private(dev);
   12083 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12084 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12085 	uint16_t wuce;
   12086 	int rv = 0;
   12087 
   12088 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12089 		device_xname(dev), __func__));
   12090 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12091 	if ((sc->sc_type == WM_T_PCH)
   12092 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12093 		device_printf(dev,
   12094 		    "Attempting to access page %d while gig enabled.\n", page);
   12095 	}
   12096 
   12097 	if (!page_set) {
   12098 		/* Enable access to PHY wakeup registers */
   12099 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12100 		if (rv != 0) {
   12101 			device_printf(dev,
   12102 			    "%s: Could not enable PHY wakeup reg access\n",
   12103 			    __func__);
   12104 			return rv;
   12105 		}
   12106 	}
   12107 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12108 		device_xname(sc->sc_dev), __func__, page, regnum));
   12109 
   12110 	/*
   12111 	 * 2) Access PHY wakeup register.
   12112 	 * See wm_access_phy_wakeup_reg_bm.
   12113 	 */
   12114 
   12115 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12116 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12117 	if (rv != 0)
   12118 		return rv;
   12119 
   12120 	if (rd) {
   12121 		/* Read the Wakeup register page value using opcode 0x12 */
   12122 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12123 	} else {
   12124 		/* Write the Wakeup register page value using opcode 0x12 */
   12125 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12126 	}
   12127 	if (rv != 0)
   12128 		return rv;
   12129 
   12130 	if (!page_set)
   12131 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12132 
   12133 	return rv;
   12134 }
   12135 
   12136 /*
   12137  * wm_gmii_hv_readreg:	[mii interface function]
   12138  *
   12139  *	Read a PHY register on the kumeran
   12140  * This could be handled by the PHY layer if we didn't have to lock the
   12141  * resource ...
   12142  */
   12143 static int
   12144 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12145 {
   12146 	struct wm_softc *sc = device_private(dev);
   12147 	int rv;
   12148 
   12149 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12150 		device_xname(dev), __func__));
   12151 
   12152 	rv = sc->phy.acquire(sc);
   12153 	if (rv != 0) {
   12154 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12155 		return rv;
   12156 	}
   12157 
   12158 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12159 	sc->phy.release(sc);
   12160 	return rv;
   12161 }
   12162 
   12163 static int
   12164 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12165 {
   12166 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12167 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12168 	int rv;
   12169 
   12170 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12171 
   12172 	/* Page 800 works differently than the rest so it has its own func */
   12173 	if (page == BM_WUC_PAGE)
   12174 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12175 
   12176 	/*
   12177 	 * Lower than page 768 works differently than the rest so it has its
   12178 	 * own func
   12179 	 */
   12180 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12181 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12182 		return -1;
   12183 	}
   12184 
   12185 	/*
   12186 	 * XXX I21[789] documents say that the SMBus Address register is at
   12187 	 * PHY address 01, Page 0 (not 768), Register 26.
   12188 	 */
   12189 	if (page == HV_INTC_FC_PAGE_START)
   12190 		page = 0;
   12191 
   12192 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12193 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12194 		    page << BME1000_PAGE_SHIFT);
   12195 		if (rv != 0)
   12196 			return rv;
   12197 	}
   12198 
   12199 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12200 }
   12201 
   12202 /*
   12203  * wm_gmii_hv_writereg:	[mii interface function]
   12204  *
   12205  *	Write a PHY register on the kumeran.
   12206  * This could be handled by the PHY layer if we didn't have to lock the
   12207  * resource ...
   12208  */
   12209 static int
   12210 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12211 {
   12212 	struct wm_softc *sc = device_private(dev);
   12213 	int rv;
   12214 
   12215 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12216 		device_xname(dev), __func__));
   12217 
   12218 	rv = sc->phy.acquire(sc);
   12219 	if (rv != 0) {
   12220 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12221 		return rv;
   12222 	}
   12223 
   12224 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12225 	sc->phy.release(sc);
   12226 
   12227 	return rv;
   12228 }
   12229 
   12230 static int
   12231 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12232 {
   12233 	struct wm_softc *sc = device_private(dev);
   12234 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12235 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12236 	int rv;
   12237 
   12238 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12239 
   12240 	/* Page 800 works differently than the rest so it has its own func */
   12241 	if (page == BM_WUC_PAGE)
   12242 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12243 		    false);
   12244 
   12245 	/*
   12246 	 * Lower than page 768 works differently than the rest so it has its
   12247 	 * own func
   12248 	 */
   12249 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12250 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12251 		return -1;
   12252 	}
   12253 
   12254 	{
   12255 		/*
   12256 		 * XXX I21[789] documents say that the SMBus Address register
   12257 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12258 		 */
   12259 		if (page == HV_INTC_FC_PAGE_START)
   12260 			page = 0;
   12261 
   12262 		/*
   12263 		 * XXX Workaround MDIO accesses being disabled after entering
   12264 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12265 		 * register is set)
   12266 		 */
   12267 		if (sc->sc_phytype == WMPHY_82578) {
   12268 			struct mii_softc *child;
   12269 
   12270 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12271 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12272 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12273 			    && ((val & (1 << 11)) != 0)) {
   12274 				device_printf(dev, "XXX need workaround\n");
   12275 			}
   12276 		}
   12277 
   12278 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12279 			rv = wm_gmii_mdic_writereg(dev, 1,
   12280 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12281 			if (rv != 0)
   12282 				return rv;
   12283 		}
   12284 	}
   12285 
   12286 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12287 }
   12288 
   12289 /*
   12290  * wm_gmii_82580_readreg:	[mii interface function]
   12291  *
   12292  *	Read a PHY register on the 82580 and I350.
   12293  * This could be handled by the PHY layer if we didn't have to lock the
   12294  * resource ...
   12295  */
   12296 static int
   12297 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12298 {
   12299 	struct wm_softc *sc = device_private(dev);
   12300 	int rv;
   12301 
   12302 	rv = sc->phy.acquire(sc);
   12303 	if (rv != 0) {
   12304 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12305 		return rv;
   12306 	}
   12307 
   12308 #ifdef DIAGNOSTIC
   12309 	if (reg > MII_ADDRMASK) {
   12310 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12311 		    __func__, sc->sc_phytype, reg);
   12312 		reg &= MII_ADDRMASK;
   12313 	}
   12314 #endif
   12315 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12316 
   12317 	sc->phy.release(sc);
   12318 	return rv;
   12319 }
   12320 
   12321 /*
   12322  * wm_gmii_82580_writereg:	[mii interface function]
   12323  *
   12324  *	Write a PHY register on the 82580 and I350.
   12325  * This could be handled by the PHY layer if we didn't have to lock the
   12326  * resource ...
   12327  */
   12328 static int
   12329 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12330 {
   12331 	struct wm_softc *sc = device_private(dev);
   12332 	int rv;
   12333 
   12334 	rv = sc->phy.acquire(sc);
   12335 	if (rv != 0) {
   12336 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12337 		return rv;
   12338 	}
   12339 
   12340 #ifdef DIAGNOSTIC
   12341 	if (reg > MII_ADDRMASK) {
   12342 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12343 		    __func__, sc->sc_phytype, reg);
   12344 		reg &= MII_ADDRMASK;
   12345 	}
   12346 #endif
   12347 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12348 
   12349 	sc->phy.release(sc);
   12350 	return rv;
   12351 }
   12352 
   12353 /*
   12354  * wm_gmii_gs40g_readreg:	[mii interface function]
   12355  *
   12356  *	Read a PHY register on the I2100 and I211.
   12357  * This could be handled by the PHY layer if we didn't have to lock the
   12358  * resource ...
   12359  */
   12360 static int
   12361 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12362 {
   12363 	struct wm_softc *sc = device_private(dev);
   12364 	int page, offset;
   12365 	int rv;
   12366 
   12367 	/* Acquire semaphore */
   12368 	rv = sc->phy.acquire(sc);
   12369 	if (rv != 0) {
   12370 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12371 		return rv;
   12372 	}
   12373 
   12374 	/* Page select */
   12375 	page = reg >> GS40G_PAGE_SHIFT;
   12376 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12377 	if (rv != 0)
   12378 		goto release;
   12379 
   12380 	/* Read reg */
   12381 	offset = reg & GS40G_OFFSET_MASK;
   12382 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12383 
   12384 release:
   12385 	sc->phy.release(sc);
   12386 	return rv;
   12387 }
   12388 
   12389 /*
   12390  * wm_gmii_gs40g_writereg:	[mii interface function]
   12391  *
   12392  *	Write a PHY register on the I210 and I211.
   12393  * This could be handled by the PHY layer if we didn't have to lock the
   12394  * resource ...
   12395  */
   12396 static int
   12397 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12398 {
   12399 	struct wm_softc *sc = device_private(dev);
   12400 	uint16_t page;
   12401 	int offset, rv;
   12402 
   12403 	/* Acquire semaphore */
   12404 	rv = sc->phy.acquire(sc);
   12405 	if (rv != 0) {
   12406 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12407 		return rv;
   12408 	}
   12409 
   12410 	/* Page select */
   12411 	page = reg >> GS40G_PAGE_SHIFT;
   12412 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12413 	if (rv != 0)
   12414 		goto release;
   12415 
   12416 	/* Write reg */
   12417 	offset = reg & GS40G_OFFSET_MASK;
   12418 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12419 
   12420 release:
   12421 	/* Release semaphore */
   12422 	sc->phy.release(sc);
   12423 	return rv;
   12424 }
   12425 
   12426 /*
   12427  * wm_gmii_statchg:	[mii interface function]
   12428  *
   12429  *	Callback from MII layer when media changes.
   12430  */
   12431 static void
   12432 wm_gmii_statchg(struct ifnet *ifp)
   12433 {
   12434 	struct wm_softc *sc = ifp->if_softc;
   12435 	struct mii_data *mii = &sc->sc_mii;
   12436 
   12437 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12438 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12439 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12440 
   12441 	/* Get flow control negotiation result. */
   12442 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12443 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12444 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12445 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12446 	}
   12447 
   12448 	if (sc->sc_flowflags & IFM_FLOW) {
   12449 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12450 			sc->sc_ctrl |= CTRL_TFCE;
   12451 			sc->sc_fcrtl |= FCRTL_XONE;
   12452 		}
   12453 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12454 			sc->sc_ctrl |= CTRL_RFCE;
   12455 	}
   12456 
   12457 	if (mii->mii_media_active & IFM_FDX) {
   12458 		DPRINTF(sc, WM_DEBUG_LINK,
   12459 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12460 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12461 	} else {
   12462 		DPRINTF(sc, WM_DEBUG_LINK,
   12463 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12464 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12465 	}
   12466 
   12467 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12468 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12469 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12470 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12471 	if (sc->sc_type == WM_T_80003) {
   12472 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12473 		case IFM_1000_T:
   12474 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12475 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12476 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12477 			break;
   12478 		default:
   12479 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12480 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12481 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12482 			break;
   12483 		}
   12484 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12485 	}
   12486 }
   12487 
   12488 /* kumeran related (80003, ICH* and PCH*) */
   12489 
   12490 /*
   12491  * wm_kmrn_readreg:
   12492  *
   12493  *	Read a kumeran register
   12494  */
   12495 static int
   12496 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12497 {
   12498 	int rv;
   12499 
   12500 	if (sc->sc_type == WM_T_80003)
   12501 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12502 	else
   12503 		rv = sc->phy.acquire(sc);
   12504 	if (rv != 0) {
   12505 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12506 		    __func__);
   12507 		return rv;
   12508 	}
   12509 
   12510 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12511 
   12512 	if (sc->sc_type == WM_T_80003)
   12513 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12514 	else
   12515 		sc->phy.release(sc);
   12516 
   12517 	return rv;
   12518 }
   12519 
   12520 static int
   12521 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12522 {
   12523 
   12524 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12525 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12526 	    KUMCTRLSTA_REN);
   12527 	CSR_WRITE_FLUSH(sc);
   12528 	delay(2);
   12529 
   12530 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12531 
   12532 	return 0;
   12533 }
   12534 
   12535 /*
   12536  * wm_kmrn_writereg:
   12537  *
   12538  *	Write a kumeran register
   12539  */
   12540 static int
   12541 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12542 {
   12543 	int rv;
   12544 
   12545 	if (sc->sc_type == WM_T_80003)
   12546 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12547 	else
   12548 		rv = sc->phy.acquire(sc);
   12549 	if (rv != 0) {
   12550 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12551 		    __func__);
   12552 		return rv;
   12553 	}
   12554 
   12555 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12556 
   12557 	if (sc->sc_type == WM_T_80003)
   12558 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12559 	else
   12560 		sc->phy.release(sc);
   12561 
   12562 	return rv;
   12563 }
   12564 
   12565 static int
   12566 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12567 {
   12568 
   12569 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12570 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12571 
   12572 	return 0;
   12573 }
   12574 
   12575 /*
   12576  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12577  * This access method is different from IEEE MMD.
   12578  */
   12579 static int
   12580 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12581 {
   12582 	struct wm_softc *sc = device_private(dev);
   12583 	int rv;
   12584 
   12585 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12586 	if (rv != 0)
   12587 		return rv;
   12588 
   12589 	if (rd)
   12590 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12591 	else
   12592 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12593 	return rv;
   12594 }
   12595 
   12596 static int
   12597 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12598 {
   12599 
   12600 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12601 }
   12602 
   12603 static int
   12604 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12605 {
   12606 
   12607 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12608 }
   12609 
   12610 /* SGMII related */
   12611 
   12612 /*
   12613  * wm_sgmii_uses_mdio
   12614  *
   12615  * Check whether the transaction is to the internal PHY or the external
   12616  * MDIO interface. Return true if it's MDIO.
   12617  */
   12618 static bool
   12619 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12620 {
   12621 	uint32_t reg;
   12622 	bool ismdio = false;
   12623 
   12624 	switch (sc->sc_type) {
   12625 	case WM_T_82575:
   12626 	case WM_T_82576:
   12627 		reg = CSR_READ(sc, WMREG_MDIC);
   12628 		ismdio = ((reg & MDIC_DEST) != 0);
   12629 		break;
   12630 	case WM_T_82580:
   12631 	case WM_T_I350:
   12632 	case WM_T_I354:
   12633 	case WM_T_I210:
   12634 	case WM_T_I211:
   12635 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12636 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12637 		break;
   12638 	default:
   12639 		break;
   12640 	}
   12641 
   12642 	return ismdio;
   12643 }
   12644 
   12645 /* Setup internal SGMII PHY for SFP */
   12646 static void
   12647 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12648 {
   12649 	uint16_t id1, id2, phyreg;
   12650 	int i, rv;
   12651 
   12652 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12653 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12654 		return;
   12655 
   12656 	for (i = 0; i < MII_NPHY; i++) {
   12657 		sc->phy.no_errprint = true;
   12658 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12659 		if (rv != 0)
   12660 			continue;
   12661 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12662 		if (rv != 0)
   12663 			continue;
   12664 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12665 			continue;
   12666 		sc->phy.no_errprint = false;
   12667 
   12668 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12669 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12670 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12671 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12672 		break;
   12673 	}
   12674 
   12675 }
   12676 
   12677 /*
   12678  * wm_sgmii_readreg:	[mii interface function]
   12679  *
   12680  *	Read a PHY register on the SGMII
   12681  * This could be handled by the PHY layer if we didn't have to lock the
   12682  * resource ...
   12683  */
   12684 static int
   12685 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12686 {
   12687 	struct wm_softc *sc = device_private(dev);
   12688 	int rv;
   12689 
   12690 	rv = sc->phy.acquire(sc);
   12691 	if (rv != 0) {
   12692 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12693 		return rv;
   12694 	}
   12695 
   12696 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12697 
   12698 	sc->phy.release(sc);
   12699 	return rv;
   12700 }
   12701 
   12702 static int
   12703 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12704 {
   12705 	struct wm_softc *sc = device_private(dev);
   12706 	uint32_t i2ccmd;
   12707 	int i, rv = 0;
   12708 
   12709 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12710 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12711 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12712 
   12713 	/* Poll the ready bit */
   12714 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12715 		delay(50);
   12716 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12717 		if (i2ccmd & I2CCMD_READY)
   12718 			break;
   12719 	}
   12720 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12721 		device_printf(dev, "I2CCMD Read did not complete\n");
   12722 		rv = ETIMEDOUT;
   12723 	}
   12724 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12725 		if (!sc->phy.no_errprint)
   12726 			device_printf(dev, "I2CCMD Error bit set\n");
   12727 		rv = EIO;
   12728 	}
   12729 
   12730 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12731 
   12732 	return rv;
   12733 }
   12734 
   12735 /*
   12736  * wm_sgmii_writereg:	[mii interface function]
   12737  *
   12738  *	Write a PHY register on the SGMII.
   12739  * This could be handled by the PHY layer if we didn't have to lock the
   12740  * resource ...
   12741  */
   12742 static int
   12743 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12744 {
   12745 	struct wm_softc *sc = device_private(dev);
   12746 	int rv;
   12747 
   12748 	rv = sc->phy.acquire(sc);
   12749 	if (rv != 0) {
   12750 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12751 		return rv;
   12752 	}
   12753 
   12754 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12755 
   12756 	sc->phy.release(sc);
   12757 
   12758 	return rv;
   12759 }
   12760 
   12761 static int
   12762 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12763 {
   12764 	struct wm_softc *sc = device_private(dev);
   12765 	uint32_t i2ccmd;
   12766 	uint16_t swapdata;
   12767 	int rv = 0;
   12768 	int i;
   12769 
   12770 	/* Swap the data bytes for the I2C interface */
   12771 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12772 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12773 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12774 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12775 
   12776 	/* Poll the ready bit */
   12777 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12778 		delay(50);
   12779 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12780 		if (i2ccmd & I2CCMD_READY)
   12781 			break;
   12782 	}
   12783 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12784 		device_printf(dev, "I2CCMD Write did not complete\n");
   12785 		rv = ETIMEDOUT;
   12786 	}
   12787 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12788 		device_printf(dev, "I2CCMD Error bit set\n");
   12789 		rv = EIO;
   12790 	}
   12791 
   12792 	return rv;
   12793 }
   12794 
   12795 /* TBI related */
   12796 
   12797 static bool
   12798 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12799 {
   12800 	bool sig;
   12801 
   12802 	sig = ctrl & CTRL_SWDPIN(1);
   12803 
   12804 	/*
   12805 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12806 	 * detect a signal, 1 if they don't.
   12807 	 */
   12808 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12809 		sig = !sig;
   12810 
   12811 	return sig;
   12812 }
   12813 
   12814 /*
   12815  * wm_tbi_mediainit:
   12816  *
   12817  *	Initialize media for use on 1000BASE-X devices.
   12818  */
   12819 static void
   12820 wm_tbi_mediainit(struct wm_softc *sc)
   12821 {
   12822 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12823 	const char *sep = "";
   12824 
   12825 	if (sc->sc_type < WM_T_82543)
   12826 		sc->sc_tipg = TIPG_WM_DFLT;
   12827 	else
   12828 		sc->sc_tipg = TIPG_LG_DFLT;
   12829 
   12830 	sc->sc_tbi_serdes_anegticks = 5;
   12831 
   12832 	/* Initialize our media structures */
   12833 	sc->sc_mii.mii_ifp = ifp;
   12834 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12835 
   12836 	ifp->if_baudrate = IF_Gbps(1);
   12837 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12838 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12839 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12840 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12841 		    sc->sc_core_lock);
   12842 	} else {
   12843 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12844 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12845 	}
   12846 
   12847 	/*
   12848 	 * SWD Pins:
   12849 	 *
   12850 	 *	0 = Link LED (output)
   12851 	 *	1 = Loss Of Signal (input)
   12852 	 */
   12853 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12854 
   12855 	/* XXX Perhaps this is only for TBI */
   12856 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12857 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12858 
   12859 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12860 		sc->sc_ctrl &= ~CTRL_LRST;
   12861 
   12862 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12863 
   12864 #define	ADD(ss, mm, dd)							  \
   12865 do {									  \
   12866 	aprint_normal("%s%s", sep, ss);					  \
   12867 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12868 	sep = ", ";							  \
   12869 } while (/*CONSTCOND*/0)
   12870 
   12871 	aprint_normal_dev(sc->sc_dev, "");
   12872 
   12873 	if (sc->sc_type == WM_T_I354) {
   12874 		uint32_t status;
   12875 
   12876 		status = CSR_READ(sc, WMREG_STATUS);
   12877 		if (((status & STATUS_2P5_SKU) != 0)
   12878 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12879 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12880 		} else
   12881 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12882 	} else if (sc->sc_type == WM_T_82545) {
   12883 		/* Only 82545 is LX (XXX except SFP) */
   12884 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12885 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12886 	} else if (sc->sc_sfptype != 0) {
   12887 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12888 		switch (sc->sc_sfptype) {
   12889 		default:
   12890 		case SFF_SFP_ETH_FLAGS_1000SX:
   12891 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12892 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12893 			break;
   12894 		case SFF_SFP_ETH_FLAGS_1000LX:
   12895 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12896 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12897 			break;
   12898 		case SFF_SFP_ETH_FLAGS_1000CX:
   12899 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12900 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12901 			break;
   12902 		case SFF_SFP_ETH_FLAGS_1000T:
   12903 			ADD("1000baseT", IFM_1000_T, 0);
   12904 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12905 			break;
   12906 		case SFF_SFP_ETH_FLAGS_100FX:
   12907 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12908 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12909 			break;
   12910 		}
   12911 	} else {
   12912 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12913 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12914 	}
   12915 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12916 	aprint_normal("\n");
   12917 
   12918 #undef ADD
   12919 
   12920 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12921 }
   12922 
   12923 /*
   12924  * wm_tbi_mediachange:	[ifmedia interface function]
   12925  *
   12926  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12927  */
   12928 static int
   12929 wm_tbi_mediachange(struct ifnet *ifp)
   12930 {
   12931 	struct wm_softc *sc = ifp->if_softc;
   12932 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12933 	uint32_t status, ctrl;
   12934 	bool signal;
   12935 	int i;
   12936 
   12937 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12938 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12939 		/* XXX need some work for >= 82571 and < 82575 */
   12940 		if (sc->sc_type < WM_T_82575)
   12941 			return 0;
   12942 	}
   12943 
   12944 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12945 	    || (sc->sc_type >= WM_T_82575))
   12946 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12947 
   12948 	sc->sc_ctrl &= ~CTRL_LRST;
   12949 	sc->sc_txcw = TXCW_ANE;
   12950 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12951 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12952 	else if (ife->ifm_media & IFM_FDX)
   12953 		sc->sc_txcw |= TXCW_FD;
   12954 	else
   12955 		sc->sc_txcw |= TXCW_HD;
   12956 
   12957 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12958 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12959 
   12960 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12961 		device_xname(sc->sc_dev), sc->sc_txcw));
   12962 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12963 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12964 	CSR_WRITE_FLUSH(sc);
   12965 	delay(1000);
   12966 
   12967 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12968 	signal = wm_tbi_havesignal(sc, ctrl);
   12969 
   12970 	DPRINTF(sc, WM_DEBUG_LINK,
   12971 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   12972 
   12973 	if (signal) {
   12974 		/* Have signal; wait for the link to come up. */
   12975 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12976 			delay(10000);
   12977 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12978 				break;
   12979 		}
   12980 
   12981 		DPRINTF(sc, WM_DEBUG_LINK,
   12982 		    ("%s: i = %d after waiting for link\n",
   12983 			device_xname(sc->sc_dev), i));
   12984 
   12985 		status = CSR_READ(sc, WMREG_STATUS);
   12986 		DPRINTF(sc, WM_DEBUG_LINK,
   12987 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   12988 			__PRIxBIT "\n",
   12989 			device_xname(sc->sc_dev), status, STATUS_LU));
   12990 		if (status & STATUS_LU) {
   12991 			/* Link is up. */
   12992 			DPRINTF(sc, WM_DEBUG_LINK,
   12993 			    ("%s: LINK: set media -> link up %s\n",
   12994 				device_xname(sc->sc_dev),
   12995 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12996 
   12997 			/*
   12998 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12999 			 * so we should update sc->sc_ctrl
   13000 			 */
   13001 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13002 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13003 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13004 			if (status & STATUS_FD)
   13005 				sc->sc_tctl |=
   13006 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13007 			else
   13008 				sc->sc_tctl |=
   13009 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13010 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13011 				sc->sc_fcrtl |= FCRTL_XONE;
   13012 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13013 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13014 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13015 			sc->sc_tbi_linkup = 1;
   13016 		} else {
   13017 			if (i == WM_LINKUP_TIMEOUT)
   13018 				wm_check_for_link(sc);
   13019 			/* Link is down. */
   13020 			DPRINTF(sc, WM_DEBUG_LINK,
   13021 			    ("%s: LINK: set media -> link down\n",
   13022 				device_xname(sc->sc_dev)));
   13023 			sc->sc_tbi_linkup = 0;
   13024 		}
   13025 	} else {
   13026 		DPRINTF(sc, WM_DEBUG_LINK,
   13027 		    ("%s: LINK: set media -> no signal\n",
   13028 			device_xname(sc->sc_dev)));
   13029 		sc->sc_tbi_linkup = 0;
   13030 	}
   13031 
   13032 	wm_tbi_serdes_set_linkled(sc);
   13033 
   13034 	return 0;
   13035 }
   13036 
   13037 /*
   13038  * wm_tbi_mediastatus:	[ifmedia interface function]
   13039  *
   13040  *	Get the current interface media status on a 1000BASE-X device.
   13041  */
   13042 static void
   13043 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13044 {
   13045 	struct wm_softc *sc = ifp->if_softc;
   13046 	uint32_t ctrl, status;
   13047 
   13048 	ifmr->ifm_status = IFM_AVALID;
   13049 	ifmr->ifm_active = IFM_ETHER;
   13050 
   13051 	status = CSR_READ(sc, WMREG_STATUS);
   13052 	if ((status & STATUS_LU) == 0) {
   13053 		ifmr->ifm_active |= IFM_NONE;
   13054 		return;
   13055 	}
   13056 
   13057 	ifmr->ifm_status |= IFM_ACTIVE;
   13058 	/* Only 82545 is LX */
   13059 	if (sc->sc_type == WM_T_82545)
   13060 		ifmr->ifm_active |= IFM_1000_LX;
   13061 	else
   13062 		ifmr->ifm_active |= IFM_1000_SX;
   13063 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13064 		ifmr->ifm_active |= IFM_FDX;
   13065 	else
   13066 		ifmr->ifm_active |= IFM_HDX;
   13067 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13068 	if (ctrl & CTRL_RFCE)
   13069 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13070 	if (ctrl & CTRL_TFCE)
   13071 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13072 }
   13073 
   13074 /* XXX TBI only */
   13075 static int
   13076 wm_check_for_link(struct wm_softc *sc)
   13077 {
   13078 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13079 	uint32_t rxcw;
   13080 	uint32_t ctrl;
   13081 	uint32_t status;
   13082 	bool signal;
   13083 
   13084 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13085 		device_xname(sc->sc_dev), __func__));
   13086 
   13087 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13088 		/* XXX need some work for >= 82571 */
   13089 		if (sc->sc_type >= WM_T_82571) {
   13090 			sc->sc_tbi_linkup = 1;
   13091 			return 0;
   13092 		}
   13093 	}
   13094 
   13095 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13096 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13097 	status = CSR_READ(sc, WMREG_STATUS);
   13098 	signal = wm_tbi_havesignal(sc, ctrl);
   13099 
   13100 	DPRINTF(sc, WM_DEBUG_LINK,
   13101 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13102 		device_xname(sc->sc_dev), __func__, signal,
   13103 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13104 
   13105 	/*
   13106 	 * SWDPIN   LU RXCW
   13107 	 *	0    0	  0
   13108 	 *	0    0	  1	(should not happen)
   13109 	 *	0    1	  0	(should not happen)
   13110 	 *	0    1	  1	(should not happen)
   13111 	 *	1    0	  0	Disable autonego and force linkup
   13112 	 *	1    0	  1	got /C/ but not linkup yet
   13113 	 *	1    1	  0	(linkup)
   13114 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13115 	 *
   13116 	 */
   13117 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13118 		DPRINTF(sc, WM_DEBUG_LINK,
   13119 		    ("%s: %s: force linkup and fullduplex\n",
   13120 			device_xname(sc->sc_dev), __func__));
   13121 		sc->sc_tbi_linkup = 0;
   13122 		/* Disable auto-negotiation in the TXCW register */
   13123 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13124 
   13125 		/*
   13126 		 * Force link-up and also force full-duplex.
   13127 		 *
   13128 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13129 		 * so we should update sc->sc_ctrl
   13130 		 */
   13131 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13132 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13133 	} else if (((status & STATUS_LU) != 0)
   13134 	    && ((rxcw & RXCW_C) != 0)
   13135 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13136 		sc->sc_tbi_linkup = 1;
   13137 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13138 			device_xname(sc->sc_dev), __func__));
   13139 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13140 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13141 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13142 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13143 			device_xname(sc->sc_dev), __func__));
   13144 	} else {
   13145 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13146 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13147 			status));
   13148 	}
   13149 
   13150 	return 0;
   13151 }
   13152 
   13153 /*
   13154  * wm_tbi_tick:
   13155  *
   13156  *	Check the link on TBI devices.
   13157  *	This function acts as mii_tick().
   13158  */
   13159 static void
   13160 wm_tbi_tick(struct wm_softc *sc)
   13161 {
   13162 	struct mii_data *mii = &sc->sc_mii;
   13163 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13164 	uint32_t status;
   13165 
   13166 	KASSERT(mutex_owned(sc->sc_core_lock));
   13167 
   13168 	status = CSR_READ(sc, WMREG_STATUS);
   13169 
   13170 	/* XXX is this needed? */
   13171 	(void)CSR_READ(sc, WMREG_RXCW);
   13172 	(void)CSR_READ(sc, WMREG_CTRL);
   13173 
   13174 	/* set link status */
   13175 	if ((status & STATUS_LU) == 0) {
   13176 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13177 			device_xname(sc->sc_dev)));
   13178 		sc->sc_tbi_linkup = 0;
   13179 	} else if (sc->sc_tbi_linkup == 0) {
   13180 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13181 			device_xname(sc->sc_dev),
   13182 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13183 		sc->sc_tbi_linkup = 1;
   13184 		sc->sc_tbi_serdes_ticks = 0;
   13185 	}
   13186 
   13187 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13188 		goto setled;
   13189 
   13190 	if ((status & STATUS_LU) == 0) {
   13191 		sc->sc_tbi_linkup = 0;
   13192 		/* If the timer expired, retry autonegotiation */
   13193 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13194 		    && (++sc->sc_tbi_serdes_ticks
   13195 			>= sc->sc_tbi_serdes_anegticks)) {
   13196 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13197 				device_xname(sc->sc_dev), __func__));
   13198 			sc->sc_tbi_serdes_ticks = 0;
   13199 			/*
   13200 			 * Reset the link, and let autonegotiation do
   13201 			 * its thing
   13202 			 */
   13203 			sc->sc_ctrl |= CTRL_LRST;
   13204 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13205 			CSR_WRITE_FLUSH(sc);
   13206 			delay(1000);
   13207 			sc->sc_ctrl &= ~CTRL_LRST;
   13208 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13209 			CSR_WRITE_FLUSH(sc);
   13210 			delay(1000);
   13211 			CSR_WRITE(sc, WMREG_TXCW,
   13212 			    sc->sc_txcw & ~TXCW_ANE);
   13213 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13214 		}
   13215 	}
   13216 
   13217 setled:
   13218 	wm_tbi_serdes_set_linkled(sc);
   13219 }
   13220 
   13221 /* SERDES related */
   13222 static void
   13223 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13224 {
   13225 	uint32_t reg;
   13226 
   13227 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13228 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13229 		return;
   13230 
   13231 	/* Enable PCS to turn on link */
   13232 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13233 	reg |= PCS_CFG_PCS_EN;
   13234 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13235 
   13236 	/* Power up the laser */
   13237 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13238 	reg &= ~CTRL_EXT_SWDPIN(3);
   13239 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13240 
   13241 	/* Flush the write to verify completion */
   13242 	CSR_WRITE_FLUSH(sc);
   13243 	delay(1000);
   13244 }
   13245 
   13246 static int
   13247 wm_serdes_mediachange(struct ifnet *ifp)
   13248 {
   13249 	struct wm_softc *sc = ifp->if_softc;
   13250 	bool pcs_autoneg = true; /* XXX */
   13251 	uint32_t ctrl_ext, pcs_lctl, reg;
   13252 
   13253 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13254 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13255 		return 0;
   13256 
   13257 	/* XXX Currently, this function is not called on 8257[12] */
   13258 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13259 	    || (sc->sc_type >= WM_T_82575))
   13260 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13261 
   13262 	/* Power on the sfp cage if present */
   13263 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13264 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13265 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13266 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13267 
   13268 	sc->sc_ctrl |= CTRL_SLU;
   13269 
   13270 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13271 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13272 
   13273 		reg = CSR_READ(sc, WMREG_CONNSW);
   13274 		reg |= CONNSW_ENRGSRC;
   13275 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13276 	}
   13277 
   13278 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13279 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13280 	case CTRL_EXT_LINK_MODE_SGMII:
   13281 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13282 		pcs_autoneg = true;
   13283 		/* Autoneg time out should be disabled for SGMII mode */
   13284 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13285 		break;
   13286 	case CTRL_EXT_LINK_MODE_1000KX:
   13287 		pcs_autoneg = false;
   13288 		/* FALLTHROUGH */
   13289 	default:
   13290 		if ((sc->sc_type == WM_T_82575)
   13291 		    || (sc->sc_type == WM_T_82576)) {
   13292 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13293 				pcs_autoneg = false;
   13294 		}
   13295 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13296 		    | CTRL_FRCFDX;
   13297 
   13298 		/* Set speed of 1000/Full if speed/duplex is forced */
   13299 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13300 	}
   13301 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13302 
   13303 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13304 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13305 
   13306 	if (pcs_autoneg) {
   13307 		/* Set PCS register for autoneg */
   13308 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13309 
   13310 		/* Disable force flow control for autoneg */
   13311 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13312 
   13313 		/* Configure flow control advertisement for autoneg */
   13314 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13315 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13316 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13317 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13318 	} else
   13319 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13320 
   13321 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13322 
   13323 	return 0;
   13324 }
   13325 
   13326 static void
   13327 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13328 {
   13329 	struct wm_softc *sc = ifp->if_softc;
   13330 	struct mii_data *mii = &sc->sc_mii;
   13331 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13332 	uint32_t pcs_adv, pcs_lpab, reg;
   13333 
   13334 	ifmr->ifm_status = IFM_AVALID;
   13335 	ifmr->ifm_active = IFM_ETHER;
   13336 
   13337 	/* Check PCS */
   13338 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13339 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13340 		ifmr->ifm_active |= IFM_NONE;
   13341 		sc->sc_tbi_linkup = 0;
   13342 		goto setled;
   13343 	}
   13344 
   13345 	sc->sc_tbi_linkup = 1;
   13346 	ifmr->ifm_status |= IFM_ACTIVE;
   13347 	if (sc->sc_type == WM_T_I354) {
   13348 		uint32_t status;
   13349 
   13350 		status = CSR_READ(sc, WMREG_STATUS);
   13351 		if (((status & STATUS_2P5_SKU) != 0)
   13352 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13353 			ifmr->ifm_active |= IFM_2500_KX;
   13354 		} else
   13355 			ifmr->ifm_active |= IFM_1000_KX;
   13356 	} else {
   13357 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13358 		case PCS_LSTS_SPEED_10:
   13359 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13360 			break;
   13361 		case PCS_LSTS_SPEED_100:
   13362 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13363 			break;
   13364 		case PCS_LSTS_SPEED_1000:
   13365 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13366 			break;
   13367 		default:
   13368 			device_printf(sc->sc_dev, "Unknown speed\n");
   13369 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13370 			break;
   13371 		}
   13372 	}
   13373 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13374 	if ((reg & PCS_LSTS_FDX) != 0)
   13375 		ifmr->ifm_active |= IFM_FDX;
   13376 	else
   13377 		ifmr->ifm_active |= IFM_HDX;
   13378 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13379 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13380 		/* Check flow */
   13381 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13382 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13383 			DPRINTF(sc, WM_DEBUG_LINK,
   13384 			    ("XXX LINKOK but not ACOMP\n"));
   13385 			goto setled;
   13386 		}
   13387 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13388 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13389 		DPRINTF(sc, WM_DEBUG_LINK,
   13390 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13391 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13392 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13393 			mii->mii_media_active |= IFM_FLOW
   13394 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13395 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13396 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13397 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13398 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13399 			mii->mii_media_active |= IFM_FLOW
   13400 			    | IFM_ETH_TXPAUSE;
   13401 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13402 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13403 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13404 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13405 			mii->mii_media_active |= IFM_FLOW
   13406 			    | IFM_ETH_RXPAUSE;
   13407 		}
   13408 	}
   13409 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13410 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13411 setled:
   13412 	wm_tbi_serdes_set_linkled(sc);
   13413 }
   13414 
   13415 /*
   13416  * wm_serdes_tick:
   13417  *
   13418  *	Check the link on serdes devices.
   13419  */
   13420 static void
   13421 wm_serdes_tick(struct wm_softc *sc)
   13422 {
   13423 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13424 	struct mii_data *mii = &sc->sc_mii;
   13425 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13426 	uint32_t reg;
   13427 
   13428 	KASSERT(mutex_owned(sc->sc_core_lock));
   13429 
   13430 	mii->mii_media_status = IFM_AVALID;
   13431 	mii->mii_media_active = IFM_ETHER;
   13432 
   13433 	/* Check PCS */
   13434 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13435 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13436 		mii->mii_media_status |= IFM_ACTIVE;
   13437 		sc->sc_tbi_linkup = 1;
   13438 		sc->sc_tbi_serdes_ticks = 0;
   13439 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13440 		if ((reg & PCS_LSTS_FDX) != 0)
   13441 			mii->mii_media_active |= IFM_FDX;
   13442 		else
   13443 			mii->mii_media_active |= IFM_HDX;
   13444 	} else {
   13445 		mii->mii_media_status |= IFM_NONE;
   13446 		sc->sc_tbi_linkup = 0;
   13447 		/* If the timer expired, retry autonegotiation */
   13448 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13449 		    && (++sc->sc_tbi_serdes_ticks
   13450 			>= sc->sc_tbi_serdes_anegticks)) {
   13451 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13452 				device_xname(sc->sc_dev), __func__));
   13453 			sc->sc_tbi_serdes_ticks = 0;
   13454 			/* XXX */
   13455 			wm_serdes_mediachange(ifp);
   13456 		}
   13457 	}
   13458 
   13459 	wm_tbi_serdes_set_linkled(sc);
   13460 }
   13461 
   13462 /* SFP related */
   13463 
   13464 static int
   13465 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13466 {
   13467 	uint32_t i2ccmd;
   13468 	int i;
   13469 
   13470 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13471 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13472 
   13473 	/* Poll the ready bit */
   13474 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13475 		delay(50);
   13476 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13477 		if (i2ccmd & I2CCMD_READY)
   13478 			break;
   13479 	}
   13480 	if ((i2ccmd & I2CCMD_READY) == 0)
   13481 		return -1;
   13482 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13483 		return -1;
   13484 
   13485 	*data = i2ccmd & 0x00ff;
   13486 
   13487 	return 0;
   13488 }
   13489 
   13490 static uint32_t
   13491 wm_sfp_get_media_type(struct wm_softc *sc)
   13492 {
   13493 	uint32_t ctrl_ext;
   13494 	uint8_t val = 0;
   13495 	int timeout = 3;
   13496 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13497 	int rv = -1;
   13498 
   13499 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13500 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13501 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13502 	CSR_WRITE_FLUSH(sc);
   13503 
   13504 	/* Read SFP module data */
   13505 	while (timeout) {
   13506 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13507 		if (rv == 0)
   13508 			break;
   13509 		delay(100*1000); /* XXX too big */
   13510 		timeout--;
   13511 	}
   13512 	if (rv != 0)
   13513 		goto out;
   13514 
   13515 	switch (val) {
   13516 	case SFF_SFP_ID_SFF:
   13517 		aprint_normal_dev(sc->sc_dev,
   13518 		    "Module/Connector soldered to board\n");
   13519 		break;
   13520 	case SFF_SFP_ID_SFP:
   13521 		sc->sc_flags |= WM_F_SFP;
   13522 		break;
   13523 	case SFF_SFP_ID_UNKNOWN:
   13524 		goto out;
   13525 	default:
   13526 		break;
   13527 	}
   13528 
   13529 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13530 	if (rv != 0)
   13531 		goto out;
   13532 
   13533 	sc->sc_sfptype = val;
   13534 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13535 		mediatype = WM_MEDIATYPE_SERDES;
   13536 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13537 		sc->sc_flags |= WM_F_SGMII;
   13538 		mediatype = WM_MEDIATYPE_COPPER;
   13539 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13540 		sc->sc_flags |= WM_F_SGMII;
   13541 		mediatype = WM_MEDIATYPE_SERDES;
   13542 	} else {
   13543 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13544 		    __func__, sc->sc_sfptype);
   13545 		sc->sc_sfptype = 0; /* XXX unknown */
   13546 	}
   13547 
   13548 out:
   13549 	/* Restore I2C interface setting */
   13550 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13551 
   13552 	return mediatype;
   13553 }
   13554 
   13555 /*
   13556  * NVM related.
   13557  * Microwire, SPI (w/wo EERD) and Flash.
   13558  */
   13559 
   13560 /* Both spi and uwire */
   13561 
   13562 /*
   13563  * wm_eeprom_sendbits:
   13564  *
   13565  *	Send a series of bits to the EEPROM.
   13566  */
   13567 static void
   13568 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13569 {
   13570 	uint32_t reg;
   13571 	int x;
   13572 
   13573 	reg = CSR_READ(sc, WMREG_EECD);
   13574 
   13575 	for (x = nbits; x > 0; x--) {
   13576 		if (bits & (1U << (x - 1)))
   13577 			reg |= EECD_DI;
   13578 		else
   13579 			reg &= ~EECD_DI;
   13580 		CSR_WRITE(sc, WMREG_EECD, reg);
   13581 		CSR_WRITE_FLUSH(sc);
   13582 		delay(2);
   13583 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13584 		CSR_WRITE_FLUSH(sc);
   13585 		delay(2);
   13586 		CSR_WRITE(sc, WMREG_EECD, reg);
   13587 		CSR_WRITE_FLUSH(sc);
   13588 		delay(2);
   13589 	}
   13590 }
   13591 
   13592 /*
   13593  * wm_eeprom_recvbits:
   13594  *
   13595  *	Receive a series of bits from the EEPROM.
   13596  */
   13597 static void
   13598 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13599 {
   13600 	uint32_t reg, val;
   13601 	int x;
   13602 
   13603 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13604 
   13605 	val = 0;
   13606 	for (x = nbits; x > 0; x--) {
   13607 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13608 		CSR_WRITE_FLUSH(sc);
   13609 		delay(2);
   13610 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13611 			val |= (1U << (x - 1));
   13612 		CSR_WRITE(sc, WMREG_EECD, reg);
   13613 		CSR_WRITE_FLUSH(sc);
   13614 		delay(2);
   13615 	}
   13616 	*valp = val;
   13617 }
   13618 
   13619 /* Microwire */
   13620 
   13621 /*
   13622  * wm_nvm_read_uwire:
   13623  *
   13624  *	Read a word from the EEPROM using the MicroWire protocol.
   13625  */
   13626 static int
   13627 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13628 {
   13629 	uint32_t reg, val;
   13630 	int i, rv;
   13631 
   13632 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13633 		device_xname(sc->sc_dev), __func__));
   13634 
   13635 	rv = sc->nvm.acquire(sc);
   13636 	if (rv != 0)
   13637 		return rv;
   13638 
   13639 	for (i = 0; i < wordcnt; i++) {
   13640 		/* Clear SK and DI. */
   13641 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13642 		CSR_WRITE(sc, WMREG_EECD, reg);
   13643 
   13644 		/*
   13645 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13646 		 * and Xen.
   13647 		 *
   13648 		 * We use this workaround only for 82540 because qemu's
   13649 		 * e1000 act as 82540.
   13650 		 */
   13651 		if (sc->sc_type == WM_T_82540) {
   13652 			reg |= EECD_SK;
   13653 			CSR_WRITE(sc, WMREG_EECD, reg);
   13654 			reg &= ~EECD_SK;
   13655 			CSR_WRITE(sc, WMREG_EECD, reg);
   13656 			CSR_WRITE_FLUSH(sc);
   13657 			delay(2);
   13658 		}
   13659 		/* XXX: end of workaround */
   13660 
   13661 		/* Set CHIP SELECT. */
   13662 		reg |= EECD_CS;
   13663 		CSR_WRITE(sc, WMREG_EECD, reg);
   13664 		CSR_WRITE_FLUSH(sc);
   13665 		delay(2);
   13666 
   13667 		/* Shift in the READ command. */
   13668 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13669 
   13670 		/* Shift in address. */
   13671 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13672 
   13673 		/* Shift out the data. */
   13674 		wm_eeprom_recvbits(sc, &val, 16);
   13675 		data[i] = val & 0xffff;
   13676 
   13677 		/* Clear CHIP SELECT. */
   13678 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13679 		CSR_WRITE(sc, WMREG_EECD, reg);
   13680 		CSR_WRITE_FLUSH(sc);
   13681 		delay(2);
   13682 	}
   13683 
   13684 	sc->nvm.release(sc);
   13685 	return 0;
   13686 }
   13687 
   13688 /* SPI */
   13689 
   13690 /*
   13691  * Set SPI and FLASH related information from the EECD register.
   13692  * For 82541 and 82547, the word size is taken from EEPROM.
   13693  */
   13694 static int
   13695 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13696 {
   13697 	int size;
   13698 	uint32_t reg;
   13699 	uint16_t data;
   13700 
   13701 	reg = CSR_READ(sc, WMREG_EECD);
   13702 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13703 
   13704 	/* Read the size of NVM from EECD by default */
   13705 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13706 	switch (sc->sc_type) {
   13707 	case WM_T_82541:
   13708 	case WM_T_82541_2:
   13709 	case WM_T_82547:
   13710 	case WM_T_82547_2:
   13711 		/* Set dummy value to access EEPROM */
   13712 		sc->sc_nvm_wordsize = 64;
   13713 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13714 			aprint_error_dev(sc->sc_dev,
   13715 			    "%s: failed to read EEPROM size\n", __func__);
   13716 		}
   13717 		reg = data;
   13718 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13719 		if (size == 0)
   13720 			size = 6; /* 64 word size */
   13721 		else
   13722 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13723 		break;
   13724 	case WM_T_80003:
   13725 	case WM_T_82571:
   13726 	case WM_T_82572:
   13727 	case WM_T_82573: /* SPI case */
   13728 	case WM_T_82574: /* SPI case */
   13729 	case WM_T_82583: /* SPI case */
   13730 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13731 		if (size > 14)
   13732 			size = 14;
   13733 		break;
   13734 	case WM_T_82575:
   13735 	case WM_T_82576:
   13736 	case WM_T_82580:
   13737 	case WM_T_I350:
   13738 	case WM_T_I354:
   13739 	case WM_T_I210:
   13740 	case WM_T_I211:
   13741 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13742 		if (size > 15)
   13743 			size = 15;
   13744 		break;
   13745 	default:
   13746 		aprint_error_dev(sc->sc_dev,
   13747 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13748 		return -1;
   13749 		break;
   13750 	}
   13751 
   13752 	sc->sc_nvm_wordsize = 1 << size;
   13753 
   13754 	return 0;
   13755 }
   13756 
   13757 /*
   13758  * wm_nvm_ready_spi:
   13759  *
   13760  *	Wait for a SPI EEPROM to be ready for commands.
   13761  */
   13762 static int
   13763 wm_nvm_ready_spi(struct wm_softc *sc)
   13764 {
   13765 	uint32_t val;
   13766 	int usec;
   13767 
   13768 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13769 		device_xname(sc->sc_dev), __func__));
   13770 
   13771 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13772 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13773 		wm_eeprom_recvbits(sc, &val, 8);
   13774 		if ((val & SPI_SR_RDY) == 0)
   13775 			break;
   13776 	}
   13777 	if (usec >= SPI_MAX_RETRIES) {
   13778 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13779 		return -1;
   13780 	}
   13781 	return 0;
   13782 }
   13783 
   13784 /*
   13785  * wm_nvm_read_spi:
   13786  *
   13787  *	Read a work from the EEPROM using the SPI protocol.
   13788  */
   13789 static int
   13790 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13791 {
   13792 	uint32_t reg, val;
   13793 	int i;
   13794 	uint8_t opc;
   13795 	int rv;
   13796 
   13797 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13798 		device_xname(sc->sc_dev), __func__));
   13799 
   13800 	rv = sc->nvm.acquire(sc);
   13801 	if (rv != 0)
   13802 		return rv;
   13803 
   13804 	/* Clear SK and CS. */
   13805 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13806 	CSR_WRITE(sc, WMREG_EECD, reg);
   13807 	CSR_WRITE_FLUSH(sc);
   13808 	delay(2);
   13809 
   13810 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13811 		goto out;
   13812 
   13813 	/* Toggle CS to flush commands. */
   13814 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13815 	CSR_WRITE_FLUSH(sc);
   13816 	delay(2);
   13817 	CSR_WRITE(sc, WMREG_EECD, reg);
   13818 	CSR_WRITE_FLUSH(sc);
   13819 	delay(2);
   13820 
   13821 	opc = SPI_OPC_READ;
   13822 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13823 		opc |= SPI_OPC_A8;
   13824 
   13825 	wm_eeprom_sendbits(sc, opc, 8);
   13826 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13827 
   13828 	for (i = 0; i < wordcnt; i++) {
   13829 		wm_eeprom_recvbits(sc, &val, 16);
   13830 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13831 	}
   13832 
   13833 	/* Raise CS and clear SK. */
   13834 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13835 	CSR_WRITE(sc, WMREG_EECD, reg);
   13836 	CSR_WRITE_FLUSH(sc);
   13837 	delay(2);
   13838 
   13839 out:
   13840 	sc->nvm.release(sc);
   13841 	return rv;
   13842 }
   13843 
   13844 /* Using with EERD */
   13845 
   13846 static int
   13847 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13848 {
   13849 	uint32_t attempts = 100000;
   13850 	uint32_t i, reg = 0;
   13851 	int32_t done = -1;
   13852 
   13853 	for (i = 0; i < attempts; i++) {
   13854 		reg = CSR_READ(sc, rw);
   13855 
   13856 		if (reg & EERD_DONE) {
   13857 			done = 0;
   13858 			break;
   13859 		}
   13860 		delay(5);
   13861 	}
   13862 
   13863 	return done;
   13864 }
   13865 
   13866 static int
   13867 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13868 {
   13869 	int i, eerd = 0;
   13870 	int rv;
   13871 
   13872 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13873 		device_xname(sc->sc_dev), __func__));
   13874 
   13875 	rv = sc->nvm.acquire(sc);
   13876 	if (rv != 0)
   13877 		return rv;
   13878 
   13879 	for (i = 0; i < wordcnt; i++) {
   13880 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13881 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13882 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13883 		if (rv != 0) {
   13884 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13885 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13886 			break;
   13887 		}
   13888 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13889 	}
   13890 
   13891 	sc->nvm.release(sc);
   13892 	return rv;
   13893 }
   13894 
   13895 /* Flash */
   13896 
   13897 static int
   13898 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13899 {
   13900 	uint32_t eecd;
   13901 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13902 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13903 	uint32_t nvm_dword = 0;
   13904 	uint8_t sig_byte = 0;
   13905 	int rv;
   13906 
   13907 	switch (sc->sc_type) {
   13908 	case WM_T_PCH_SPT:
   13909 	case WM_T_PCH_CNP:
   13910 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13911 		act_offset = ICH_NVM_SIG_WORD * 2;
   13912 
   13913 		/* Set bank to 0 in case flash read fails. */
   13914 		*bank = 0;
   13915 
   13916 		/* Check bank 0 */
   13917 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13918 		if (rv != 0)
   13919 			return rv;
   13920 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13921 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13922 			*bank = 0;
   13923 			return 0;
   13924 		}
   13925 
   13926 		/* Check bank 1 */
   13927 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13928 		    &nvm_dword);
   13929 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13930 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13931 			*bank = 1;
   13932 			return 0;
   13933 		}
   13934 		aprint_error_dev(sc->sc_dev,
   13935 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13936 		return -1;
   13937 	case WM_T_ICH8:
   13938 	case WM_T_ICH9:
   13939 		eecd = CSR_READ(sc, WMREG_EECD);
   13940 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13941 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13942 			return 0;
   13943 		}
   13944 		/* FALLTHROUGH */
   13945 	default:
   13946 		/* Default to 0 */
   13947 		*bank = 0;
   13948 
   13949 		/* Check bank 0 */
   13950 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13951 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13952 			*bank = 0;
   13953 			return 0;
   13954 		}
   13955 
   13956 		/* Check bank 1 */
   13957 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13958 		    &sig_byte);
   13959 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13960 			*bank = 1;
   13961 			return 0;
   13962 		}
   13963 	}
   13964 
   13965 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13966 		device_xname(sc->sc_dev)));
   13967 	return -1;
   13968 }
   13969 
   13970 /******************************************************************************
   13971  * This function does initial flash setup so that a new read/write/erase cycle
   13972  * can be started.
   13973  *
   13974  * sc - The pointer to the hw structure
   13975  ****************************************************************************/
   13976 static int32_t
   13977 wm_ich8_cycle_init(struct wm_softc *sc)
   13978 {
   13979 	uint16_t hsfsts;
   13980 	int32_t error = 1;
   13981 	int32_t i     = 0;
   13982 
   13983 	if (sc->sc_type >= WM_T_PCH_SPT)
   13984 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13985 	else
   13986 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13987 
   13988 	/* May be check the Flash Des Valid bit in Hw status */
   13989 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13990 		return error;
   13991 
   13992 	/* Clear FCERR in Hw status by writing 1 */
   13993 	/* Clear DAEL in Hw status by writing a 1 */
   13994 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13995 
   13996 	if (sc->sc_type >= WM_T_PCH_SPT)
   13997 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13998 	else
   13999 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14000 
   14001 	/*
   14002 	 * Either we should have a hardware SPI cycle in progress bit to check
   14003 	 * against, in order to start a new cycle or FDONE bit should be
   14004 	 * changed in the hardware so that it is 1 after hardware reset, which
   14005 	 * can then be used as an indication whether a cycle is in progress or
   14006 	 * has been completed .. we should also have some software semaphore
   14007 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14008 	 * threads access to those bits can be sequentiallized or a way so that
   14009 	 * 2 threads don't start the cycle at the same time
   14010 	 */
   14011 
   14012 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14013 		/*
   14014 		 * There is no cycle running at present, so we can start a
   14015 		 * cycle
   14016 		 */
   14017 
   14018 		/* Begin by setting Flash Cycle Done. */
   14019 		hsfsts |= HSFSTS_DONE;
   14020 		if (sc->sc_type >= WM_T_PCH_SPT)
   14021 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14022 			    hsfsts & 0xffffUL);
   14023 		else
   14024 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14025 		error = 0;
   14026 	} else {
   14027 		/*
   14028 		 * Otherwise poll for sometime so the current cycle has a
   14029 		 * chance to end before giving up.
   14030 		 */
   14031 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14032 			if (sc->sc_type >= WM_T_PCH_SPT)
   14033 				hsfsts = ICH8_FLASH_READ32(sc,
   14034 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14035 			else
   14036 				hsfsts = ICH8_FLASH_READ16(sc,
   14037 				    ICH_FLASH_HSFSTS);
   14038 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14039 				error = 0;
   14040 				break;
   14041 			}
   14042 			delay(1);
   14043 		}
   14044 		if (error == 0) {
   14045 			/*
   14046 			 * Successful in waiting for previous cycle to timeout,
   14047 			 * now set the Flash Cycle Done.
   14048 			 */
   14049 			hsfsts |= HSFSTS_DONE;
   14050 			if (sc->sc_type >= WM_T_PCH_SPT)
   14051 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14052 				    hsfsts & 0xffffUL);
   14053 			else
   14054 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14055 				    hsfsts);
   14056 		}
   14057 	}
   14058 	return error;
   14059 }
   14060 
   14061 /******************************************************************************
   14062  * This function starts a flash cycle and waits for its completion
   14063  *
   14064  * sc - The pointer to the hw structure
   14065  ****************************************************************************/
   14066 static int32_t
   14067 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14068 {
   14069 	uint16_t hsflctl;
   14070 	uint16_t hsfsts;
   14071 	int32_t error = 1;
   14072 	uint32_t i = 0;
   14073 
   14074 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14075 	if (sc->sc_type >= WM_T_PCH_SPT)
   14076 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14077 	else
   14078 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14079 	hsflctl |= HSFCTL_GO;
   14080 	if (sc->sc_type >= WM_T_PCH_SPT)
   14081 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14082 		    (uint32_t)hsflctl << 16);
   14083 	else
   14084 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14085 
   14086 	/* Wait till FDONE bit is set to 1 */
   14087 	do {
   14088 		if (sc->sc_type >= WM_T_PCH_SPT)
   14089 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14090 			    & 0xffffUL;
   14091 		else
   14092 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14093 		if (hsfsts & HSFSTS_DONE)
   14094 			break;
   14095 		delay(1);
   14096 		i++;
   14097 	} while (i < timeout);
   14098 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14099 		error = 0;
   14100 
   14101 	return error;
   14102 }
   14103 
   14104 /******************************************************************************
   14105  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14106  *
   14107  * sc - The pointer to the hw structure
   14108  * index - The index of the byte or word to read.
   14109  * size - Size of data to read, 1=byte 2=word, 4=dword
   14110  * data - Pointer to the word to store the value read.
   14111  *****************************************************************************/
   14112 static int32_t
   14113 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14114     uint32_t size, uint32_t *data)
   14115 {
   14116 	uint16_t hsfsts;
   14117 	uint16_t hsflctl;
   14118 	uint32_t flash_linear_address;
   14119 	uint32_t flash_data = 0;
   14120 	int32_t error = 1;
   14121 	int32_t count = 0;
   14122 
   14123 	if (size < 1  || size > 4 || data == 0x0 ||
   14124 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14125 		return error;
   14126 
   14127 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14128 	    sc->sc_ich8_flash_base;
   14129 
   14130 	do {
   14131 		delay(1);
   14132 		/* Steps */
   14133 		error = wm_ich8_cycle_init(sc);
   14134 		if (error)
   14135 			break;
   14136 
   14137 		if (sc->sc_type >= WM_T_PCH_SPT)
   14138 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14139 			    >> 16;
   14140 		else
   14141 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14142 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14143 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14144 		    & HSFCTL_BCOUNT_MASK;
   14145 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14146 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14147 			/*
   14148 			 * In SPT, This register is in Lan memory space, not
   14149 			 * flash. Therefore, only 32 bit access is supported.
   14150 			 */
   14151 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14152 			    (uint32_t)hsflctl << 16);
   14153 		} else
   14154 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14155 
   14156 		/*
   14157 		 * Write the last 24 bits of index into Flash Linear address
   14158 		 * field in Flash Address
   14159 		 */
   14160 		/* TODO: TBD maybe check the index against the size of flash */
   14161 
   14162 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14163 
   14164 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14165 
   14166 		/*
   14167 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14168 		 * the whole sequence a few more times, else read in (shift in)
   14169 		 * the Flash Data0, the order is least significant byte first
   14170 		 * msb to lsb
   14171 		 */
   14172 		if (error == 0) {
   14173 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14174 			if (size == 1)
   14175 				*data = (uint8_t)(flash_data & 0x000000FF);
   14176 			else if (size == 2)
   14177 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14178 			else if (size == 4)
   14179 				*data = (uint32_t)flash_data;
   14180 			break;
   14181 		} else {
   14182 			/*
   14183 			 * If we've gotten here, then things are probably
   14184 			 * completely hosed, but if the error condition is
   14185 			 * detected, it won't hurt to give it another try...
   14186 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14187 			 */
   14188 			if (sc->sc_type >= WM_T_PCH_SPT)
   14189 				hsfsts = ICH8_FLASH_READ32(sc,
   14190 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14191 			else
   14192 				hsfsts = ICH8_FLASH_READ16(sc,
   14193 				    ICH_FLASH_HSFSTS);
   14194 
   14195 			if (hsfsts & HSFSTS_ERR) {
   14196 				/* Repeat for some time before giving up. */
   14197 				continue;
   14198 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14199 				break;
   14200 		}
   14201 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14202 
   14203 	return error;
   14204 }
   14205 
   14206 /******************************************************************************
   14207  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14208  *
   14209  * sc - pointer to wm_hw structure
   14210  * index - The index of the byte to read.
   14211  * data - Pointer to a byte to store the value read.
   14212  *****************************************************************************/
   14213 static int32_t
   14214 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14215 {
   14216 	int32_t status;
   14217 	uint32_t word = 0;
   14218 
   14219 	status = wm_read_ich8_data(sc, index, 1, &word);
   14220 	if (status == 0)
   14221 		*data = (uint8_t)word;
   14222 	else
   14223 		*data = 0;
   14224 
   14225 	return status;
   14226 }
   14227 
   14228 /******************************************************************************
   14229  * Reads a word from the NVM using the ICH8 flash access registers.
   14230  *
   14231  * sc - pointer to wm_hw structure
   14232  * index - The starting byte index of the word to read.
   14233  * data - Pointer to a word to store the value read.
   14234  *****************************************************************************/
   14235 static int32_t
   14236 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14237 {
   14238 	int32_t status;
   14239 	uint32_t word = 0;
   14240 
   14241 	status = wm_read_ich8_data(sc, index, 2, &word);
   14242 	if (status == 0)
   14243 		*data = (uint16_t)word;
   14244 	else
   14245 		*data = 0;
   14246 
   14247 	return status;
   14248 }
   14249 
   14250 /******************************************************************************
   14251  * Reads a dword from the NVM using the ICH8 flash access registers.
   14252  *
   14253  * sc - pointer to wm_hw structure
   14254  * index - The starting byte index of the word to read.
   14255  * data - Pointer to a word to store the value read.
   14256  *****************************************************************************/
   14257 static int32_t
   14258 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14259 {
   14260 	int32_t status;
   14261 
   14262 	status = wm_read_ich8_data(sc, index, 4, data);
   14263 	return status;
   14264 }
   14265 
   14266 /******************************************************************************
   14267  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14268  * register.
   14269  *
   14270  * sc - Struct containing variables accessed by shared code
   14271  * offset - offset of word in the EEPROM to read
   14272  * data - word read from the EEPROM
   14273  * words - number of words to read
   14274  *****************************************************************************/
   14275 static int
   14276 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14277 {
   14278 	int rv;
   14279 	uint32_t flash_bank = 0;
   14280 	uint32_t act_offset = 0;
   14281 	uint32_t bank_offset = 0;
   14282 	uint16_t word = 0;
   14283 	uint16_t i = 0;
   14284 
   14285 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14286 		device_xname(sc->sc_dev), __func__));
   14287 
   14288 	rv = sc->nvm.acquire(sc);
   14289 	if (rv != 0)
   14290 		return rv;
   14291 
   14292 	/*
   14293 	 * We need to know which is the valid flash bank.  In the event
   14294 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14295 	 * managing flash_bank. So it cannot be trusted and needs
   14296 	 * to be updated with each read.
   14297 	 */
   14298 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14299 	if (rv) {
   14300 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14301 			device_xname(sc->sc_dev)));
   14302 		flash_bank = 0;
   14303 	}
   14304 
   14305 	/*
   14306 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14307 	 * size
   14308 	 */
   14309 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14310 
   14311 	for (i = 0; i < words; i++) {
   14312 		/* The NVM part needs a byte offset, hence * 2 */
   14313 		act_offset = bank_offset + ((offset + i) * 2);
   14314 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14315 		if (rv) {
   14316 			aprint_error_dev(sc->sc_dev,
   14317 			    "%s: failed to read NVM\n", __func__);
   14318 			break;
   14319 		}
   14320 		data[i] = word;
   14321 	}
   14322 
   14323 	sc->nvm.release(sc);
   14324 	return rv;
   14325 }
   14326 
   14327 /******************************************************************************
   14328  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14329  * register.
   14330  *
   14331  * sc - Struct containing variables accessed by shared code
   14332  * offset - offset of word in the EEPROM to read
   14333  * data - word read from the EEPROM
   14334  * words - number of words to read
   14335  *****************************************************************************/
   14336 static int
   14337 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14338 {
   14339 	int	 rv;
   14340 	uint32_t flash_bank = 0;
   14341 	uint32_t act_offset = 0;
   14342 	uint32_t bank_offset = 0;
   14343 	uint32_t dword = 0;
   14344 	uint16_t i = 0;
   14345 
   14346 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14347 		device_xname(sc->sc_dev), __func__));
   14348 
   14349 	rv = sc->nvm.acquire(sc);
   14350 	if (rv != 0)
   14351 		return rv;
   14352 
   14353 	/*
   14354 	 * We need to know which is the valid flash bank.  In the event
   14355 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14356 	 * managing flash_bank. So it cannot be trusted and needs
   14357 	 * to be updated with each read.
   14358 	 */
   14359 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14360 	if (rv) {
   14361 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14362 			device_xname(sc->sc_dev)));
   14363 		flash_bank = 0;
   14364 	}
   14365 
   14366 	/*
   14367 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14368 	 * size
   14369 	 */
   14370 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14371 
   14372 	for (i = 0; i < words; i++) {
   14373 		/* The NVM part needs a byte offset, hence * 2 */
   14374 		act_offset = bank_offset + ((offset + i) * 2);
   14375 		/* but we must read dword aligned, so mask ... */
   14376 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14377 		if (rv) {
   14378 			aprint_error_dev(sc->sc_dev,
   14379 			    "%s: failed to read NVM\n", __func__);
   14380 			break;
   14381 		}
   14382 		/* ... and pick out low or high word */
   14383 		if ((act_offset & 0x2) == 0)
   14384 			data[i] = (uint16_t)(dword & 0xFFFF);
   14385 		else
   14386 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14387 	}
   14388 
   14389 	sc->nvm.release(sc);
   14390 	return rv;
   14391 }
   14392 
   14393 /* iNVM */
   14394 
   14395 static int
   14396 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14397 {
   14398 	int32_t	 rv = 0;
   14399 	uint32_t invm_dword;
   14400 	uint16_t i;
   14401 	uint8_t record_type, word_address;
   14402 
   14403 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14404 		device_xname(sc->sc_dev), __func__));
   14405 
   14406 	for (i = 0; i < INVM_SIZE; i++) {
   14407 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14408 		/* Get record type */
   14409 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14410 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14411 			break;
   14412 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14413 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14414 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14415 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14416 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14417 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14418 			if (word_address == address) {
   14419 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14420 				rv = 0;
   14421 				break;
   14422 			}
   14423 		}
   14424 	}
   14425 
   14426 	return rv;
   14427 }
   14428 
   14429 static int
   14430 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14431 {
   14432 	int i, rv;
   14433 
   14434 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14435 		device_xname(sc->sc_dev), __func__));
   14436 
   14437 	rv = sc->nvm.acquire(sc);
   14438 	if (rv != 0)
   14439 		return rv;
   14440 
   14441 	for (i = 0; i < words; i++) {
   14442 		switch (offset + i) {
   14443 		case NVM_OFF_MACADDR:
   14444 		case NVM_OFF_MACADDR1:
   14445 		case NVM_OFF_MACADDR2:
   14446 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14447 			if (rv != 0) {
   14448 				data[i] = 0xffff;
   14449 				rv = -1;
   14450 			}
   14451 			break;
   14452 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14453 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14454 			if (rv != 0) {
   14455 				*data = INVM_DEFAULT_AL;
   14456 				rv = 0;
   14457 			}
   14458 			break;
   14459 		case NVM_OFF_CFG2:
   14460 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14461 			if (rv != 0) {
   14462 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14463 				rv = 0;
   14464 			}
   14465 			break;
   14466 		case NVM_OFF_CFG4:
   14467 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14468 			if (rv != 0) {
   14469 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14470 				rv = 0;
   14471 			}
   14472 			break;
   14473 		case NVM_OFF_LED_1_CFG:
   14474 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14475 			if (rv != 0) {
   14476 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14477 				rv = 0;
   14478 			}
   14479 			break;
   14480 		case NVM_OFF_LED_0_2_CFG:
   14481 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14482 			if (rv != 0) {
   14483 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14484 				rv = 0;
   14485 			}
   14486 			break;
   14487 		case NVM_OFF_ID_LED_SETTINGS:
   14488 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14489 			if (rv != 0) {
   14490 				*data = ID_LED_RESERVED_FFFF;
   14491 				rv = 0;
   14492 			}
   14493 			break;
   14494 		default:
   14495 			DPRINTF(sc, WM_DEBUG_NVM,
   14496 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14497 			*data = NVM_RESERVED_WORD;
   14498 			break;
   14499 		}
   14500 	}
   14501 
   14502 	sc->nvm.release(sc);
   14503 	return rv;
   14504 }
   14505 
   14506 /* Lock, detecting NVM type, validate checksum, version and read */
   14507 
   14508 static int
   14509 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14510 {
   14511 	uint32_t eecd = 0;
   14512 
   14513 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14514 	    || sc->sc_type == WM_T_82583) {
   14515 		eecd = CSR_READ(sc, WMREG_EECD);
   14516 
   14517 		/* Isolate bits 15 & 16 */
   14518 		eecd = ((eecd >> 15) & 0x03);
   14519 
   14520 		/* If both bits are set, device is Flash type */
   14521 		if (eecd == 0x03)
   14522 			return 0;
   14523 	}
   14524 	return 1;
   14525 }
   14526 
   14527 static int
   14528 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14529 {
   14530 	uint32_t eec;
   14531 
   14532 	eec = CSR_READ(sc, WMREG_EEC);
   14533 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14534 		return 1;
   14535 
   14536 	return 0;
   14537 }
   14538 
   14539 /*
   14540  * wm_nvm_validate_checksum
   14541  *
   14542  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14543  */
   14544 static int
   14545 wm_nvm_validate_checksum(struct wm_softc *sc)
   14546 {
   14547 	uint16_t checksum;
   14548 	uint16_t eeprom_data;
   14549 #ifdef WM_DEBUG
   14550 	uint16_t csum_wordaddr, valid_checksum;
   14551 #endif
   14552 	int i;
   14553 
   14554 	checksum = 0;
   14555 
   14556 	/* Don't check for I211 */
   14557 	if (sc->sc_type == WM_T_I211)
   14558 		return 0;
   14559 
   14560 #ifdef WM_DEBUG
   14561 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14562 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14563 		csum_wordaddr = NVM_OFF_COMPAT;
   14564 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14565 	} else {
   14566 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14567 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14568 	}
   14569 
   14570 	/* Dump EEPROM image for debug */
   14571 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14572 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14573 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14574 		/* XXX PCH_SPT? */
   14575 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14576 		if ((eeprom_data & valid_checksum) == 0)
   14577 			DPRINTF(sc, WM_DEBUG_NVM,
   14578 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14579 				device_xname(sc->sc_dev), eeprom_data,
   14580 				valid_checksum));
   14581 	}
   14582 
   14583 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14584 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14585 		for (i = 0; i < NVM_SIZE; i++) {
   14586 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14587 				printf("XXXX ");
   14588 			else
   14589 				printf("%04hx ", eeprom_data);
   14590 			if (i % 8 == 7)
   14591 				printf("\n");
   14592 		}
   14593 	}
   14594 
   14595 #endif /* WM_DEBUG */
   14596 
   14597 	for (i = 0; i < NVM_SIZE; i++) {
   14598 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14599 			return -1;
   14600 		checksum += eeprom_data;
   14601 	}
   14602 
   14603 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14604 #ifdef WM_DEBUG
   14605 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14606 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14607 #endif
   14608 	}
   14609 
   14610 	return 0;
   14611 }
   14612 
   14613 static void
   14614 wm_nvm_version_invm(struct wm_softc *sc)
   14615 {
   14616 	uint32_t dword;
   14617 
   14618 	/*
   14619 	 * Linux's code to decode version is very strange, so we don't
   14620 	 * obey that algorithm and just use word 61 as the document.
   14621 	 * Perhaps it's not perfect though...
   14622 	 *
   14623 	 * Example:
   14624 	 *
   14625 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14626 	 */
   14627 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14628 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14629 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14630 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14631 }
   14632 
   14633 static void
   14634 wm_nvm_version(struct wm_softc *sc)
   14635 {
   14636 	uint16_t major, minor, build, patch;
   14637 	uint16_t uid0, uid1;
   14638 	uint16_t nvm_data;
   14639 	uint16_t off;
   14640 	bool check_version = false;
   14641 	bool check_optionrom = false;
   14642 	bool have_build = false;
   14643 	bool have_uid = true;
   14644 
   14645 	/*
   14646 	 * Version format:
   14647 	 *
   14648 	 * XYYZ
   14649 	 * X0YZ
   14650 	 * X0YY
   14651 	 *
   14652 	 * Example:
   14653 	 *
   14654 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14655 	 *	82571	0x50a6	5.10.6?
   14656 	 *	82572	0x506a	5.6.10?
   14657 	 *	82572EI	0x5069	5.6.9?
   14658 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14659 	 *		0x2013	2.1.3?
   14660 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14661 	 * ICH8+82567	0x0040	0.4.0?
   14662 	 * ICH9+82566	0x1040	1.4.0?
   14663 	 *ICH10+82567	0x0043	0.4.3?
   14664 	 *  PCH+82577	0x00c1	0.12.1?
   14665 	 * PCH2+82579	0x00d3	0.13.3?
   14666 	 *		0x00d4	0.13.4?
   14667 	 *  LPT+I218	0x0023	0.2.3?
   14668 	 *  SPT+I219	0x0084	0.8.4?
   14669 	 *  CNP+I219	0x0054	0.5.4?
   14670 	 */
   14671 
   14672 	/*
   14673 	 * XXX
   14674 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14675 	 * I've never seen real 82574 hardware with such small SPI ROM.
   14676 	 */
   14677 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14678 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14679 		have_uid = false;
   14680 
   14681 	switch (sc->sc_type) {
   14682 	case WM_T_82571:
   14683 	case WM_T_82572:
   14684 	case WM_T_82574:
   14685 	case WM_T_82583:
   14686 		check_version = true;
   14687 		check_optionrom = true;
   14688 		have_build = true;
   14689 		break;
   14690 	case WM_T_ICH8:
   14691 	case WM_T_ICH9:
   14692 	case WM_T_ICH10:
   14693 	case WM_T_PCH:
   14694 	case WM_T_PCH2:
   14695 	case WM_T_PCH_LPT:
   14696 	case WM_T_PCH_SPT:
   14697 	case WM_T_PCH_CNP:
   14698 		check_version = true;
   14699 		have_build = true;
   14700 		have_uid = false;
   14701 		break;
   14702 	case WM_T_82575:
   14703 	case WM_T_82576:
   14704 	case WM_T_82580:
   14705 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14706 			check_version = true;
   14707 		break;
   14708 	case WM_T_I211:
   14709 		wm_nvm_version_invm(sc);
   14710 		have_uid = false;
   14711 		goto printver;
   14712 	case WM_T_I210:
   14713 		if (!wm_nvm_flash_presence_i210(sc)) {
   14714 			wm_nvm_version_invm(sc);
   14715 			have_uid = false;
   14716 			goto printver;
   14717 		}
   14718 		/* FALLTHROUGH */
   14719 	case WM_T_I350:
   14720 	case WM_T_I354:
   14721 		check_version = true;
   14722 		check_optionrom = true;
   14723 		break;
   14724 	default:
   14725 		return;
   14726 	}
   14727 	if (check_version
   14728 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14729 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14730 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14731 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14732 			build = nvm_data & NVM_BUILD_MASK;
   14733 			have_build = true;
   14734 		} else
   14735 			minor = nvm_data & 0x00ff;
   14736 
   14737 		/* Decimal */
   14738 		minor = (minor / 16) * 10 + (minor % 16);
   14739 		sc->sc_nvm_ver_major = major;
   14740 		sc->sc_nvm_ver_minor = minor;
   14741 
   14742 printver:
   14743 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14744 		    sc->sc_nvm_ver_minor);
   14745 		if (have_build) {
   14746 			sc->sc_nvm_ver_build = build;
   14747 			aprint_verbose(".%d", build);
   14748 		}
   14749 	}
   14750 
   14751 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14752 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14753 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14754 		/* Option ROM Version */
   14755 		if ((off != 0x0000) && (off != 0xffff)) {
   14756 			int rv;
   14757 
   14758 			off += NVM_COMBO_VER_OFF;
   14759 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14760 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14761 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14762 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14763 				/* 16bits */
   14764 				major = uid0 >> 8;
   14765 				build = (uid0 << 8) | (uid1 >> 8);
   14766 				patch = uid1 & 0x00ff;
   14767 				aprint_verbose(", option ROM Version %d.%d.%d",
   14768 				    major, build, patch);
   14769 			}
   14770 		}
   14771 	}
   14772 
   14773 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14774 		aprint_verbose(", Image Unique ID %08x",
   14775 		    ((uint32_t)uid1 << 16) | uid0);
   14776 }
   14777 
   14778 /*
   14779  * wm_nvm_read:
   14780  *
   14781  *	Read data from the serial EEPROM.
   14782  */
   14783 static int
   14784 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14785 {
   14786 	int rv;
   14787 
   14788 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14789 		device_xname(sc->sc_dev), __func__));
   14790 
   14791 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14792 		return -1;
   14793 
   14794 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14795 
   14796 	return rv;
   14797 }
   14798 
   14799 /*
   14800  * Hardware semaphores.
   14801  * Very complexed...
   14802  */
   14803 
   14804 static int
   14805 wm_get_null(struct wm_softc *sc)
   14806 {
   14807 
   14808 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14809 		device_xname(sc->sc_dev), __func__));
   14810 	return 0;
   14811 }
   14812 
   14813 static void
   14814 wm_put_null(struct wm_softc *sc)
   14815 {
   14816 
   14817 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14818 		device_xname(sc->sc_dev), __func__));
   14819 	return;
   14820 }
   14821 
   14822 static int
   14823 wm_get_eecd(struct wm_softc *sc)
   14824 {
   14825 	uint32_t reg;
   14826 	int x;
   14827 
   14828 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14829 		device_xname(sc->sc_dev), __func__));
   14830 
   14831 	reg = CSR_READ(sc, WMREG_EECD);
   14832 
   14833 	/* Request EEPROM access. */
   14834 	reg |= EECD_EE_REQ;
   14835 	CSR_WRITE(sc, WMREG_EECD, reg);
   14836 
   14837 	/* ..and wait for it to be granted. */
   14838 	for (x = 0; x < 1000; x++) {
   14839 		reg = CSR_READ(sc, WMREG_EECD);
   14840 		if (reg & EECD_EE_GNT)
   14841 			break;
   14842 		delay(5);
   14843 	}
   14844 	if ((reg & EECD_EE_GNT) == 0) {
   14845 		aprint_error_dev(sc->sc_dev,
   14846 		    "could not acquire EEPROM GNT\n");
   14847 		reg &= ~EECD_EE_REQ;
   14848 		CSR_WRITE(sc, WMREG_EECD, reg);
   14849 		return -1;
   14850 	}
   14851 
   14852 	return 0;
   14853 }
   14854 
   14855 static void
   14856 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14857 {
   14858 
   14859 	*eecd |= EECD_SK;
   14860 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14861 	CSR_WRITE_FLUSH(sc);
   14862 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14863 		delay(1);
   14864 	else
   14865 		delay(50);
   14866 }
   14867 
   14868 static void
   14869 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14870 {
   14871 
   14872 	*eecd &= ~EECD_SK;
   14873 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14874 	CSR_WRITE_FLUSH(sc);
   14875 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14876 		delay(1);
   14877 	else
   14878 		delay(50);
   14879 }
   14880 
   14881 static void
   14882 wm_put_eecd(struct wm_softc *sc)
   14883 {
   14884 	uint32_t reg;
   14885 
   14886 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14887 		device_xname(sc->sc_dev), __func__));
   14888 
   14889 	/* Stop nvm */
   14890 	reg = CSR_READ(sc, WMREG_EECD);
   14891 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14892 		/* Pull CS high */
   14893 		reg |= EECD_CS;
   14894 		wm_nvm_eec_clock_lower(sc, &reg);
   14895 	} else {
   14896 		/* CS on Microwire is active-high */
   14897 		reg &= ~(EECD_CS | EECD_DI);
   14898 		CSR_WRITE(sc, WMREG_EECD, reg);
   14899 		wm_nvm_eec_clock_raise(sc, &reg);
   14900 		wm_nvm_eec_clock_lower(sc, &reg);
   14901 	}
   14902 
   14903 	reg = CSR_READ(sc, WMREG_EECD);
   14904 	reg &= ~EECD_EE_REQ;
   14905 	CSR_WRITE(sc, WMREG_EECD, reg);
   14906 
   14907 	return;
   14908 }
   14909 
   14910 /*
   14911  * Get hardware semaphore.
   14912  * Same as e1000_get_hw_semaphore_generic()
   14913  */
   14914 static int
   14915 wm_get_swsm_semaphore(struct wm_softc *sc)
   14916 {
   14917 	int32_t timeout;
   14918 	uint32_t swsm;
   14919 
   14920 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14921 		device_xname(sc->sc_dev), __func__));
   14922 	KASSERT(sc->sc_nvm_wordsize > 0);
   14923 
   14924 retry:
   14925 	/* Get the SW semaphore. */
   14926 	timeout = sc->sc_nvm_wordsize + 1;
   14927 	while (timeout) {
   14928 		swsm = CSR_READ(sc, WMREG_SWSM);
   14929 
   14930 		if ((swsm & SWSM_SMBI) == 0)
   14931 			break;
   14932 
   14933 		delay(50);
   14934 		timeout--;
   14935 	}
   14936 
   14937 	if (timeout == 0) {
   14938 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14939 			/*
   14940 			 * In rare circumstances, the SW semaphore may already
   14941 			 * be held unintentionally. Clear the semaphore once
   14942 			 * before giving up.
   14943 			 */
   14944 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14945 			wm_put_swsm_semaphore(sc);
   14946 			goto retry;
   14947 		}
   14948 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   14949 		return -1;
   14950 	}
   14951 
   14952 	/* Get the FW semaphore. */
   14953 	timeout = sc->sc_nvm_wordsize + 1;
   14954 	while (timeout) {
   14955 		swsm = CSR_READ(sc, WMREG_SWSM);
   14956 		swsm |= SWSM_SWESMBI;
   14957 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14958 		/* If we managed to set the bit we got the semaphore. */
   14959 		swsm = CSR_READ(sc, WMREG_SWSM);
   14960 		if (swsm & SWSM_SWESMBI)
   14961 			break;
   14962 
   14963 		delay(50);
   14964 		timeout--;
   14965 	}
   14966 
   14967 	if (timeout == 0) {
   14968 		aprint_error_dev(sc->sc_dev,
   14969 		    "could not acquire SWSM SWESMBI\n");
   14970 		/* Release semaphores */
   14971 		wm_put_swsm_semaphore(sc);
   14972 		return -1;
   14973 	}
   14974 	return 0;
   14975 }
   14976 
   14977 /*
   14978  * Put hardware semaphore.
   14979  * Same as e1000_put_hw_semaphore_generic()
   14980  */
   14981 static void
   14982 wm_put_swsm_semaphore(struct wm_softc *sc)
   14983 {
   14984 	uint32_t swsm;
   14985 
   14986 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14987 		device_xname(sc->sc_dev), __func__));
   14988 
   14989 	swsm = CSR_READ(sc, WMREG_SWSM);
   14990 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14991 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14992 }
   14993 
   14994 /*
   14995  * Get SW/FW semaphore.
   14996  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14997  */
   14998 static int
   14999 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15000 {
   15001 	uint32_t swfw_sync;
   15002 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15003 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15004 	int timeout;
   15005 
   15006 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15007 		device_xname(sc->sc_dev), __func__));
   15008 
   15009 	if (sc->sc_type == WM_T_80003)
   15010 		timeout = 50;
   15011 	else
   15012 		timeout = 200;
   15013 
   15014 	while (timeout) {
   15015 		if (wm_get_swsm_semaphore(sc)) {
   15016 			aprint_error_dev(sc->sc_dev,
   15017 			    "%s: failed to get semaphore\n",
   15018 			    __func__);
   15019 			return -1;
   15020 		}
   15021 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15022 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15023 			swfw_sync |= swmask;
   15024 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15025 			wm_put_swsm_semaphore(sc);
   15026 			return 0;
   15027 		}
   15028 		wm_put_swsm_semaphore(sc);
   15029 		delay(5000);
   15030 		timeout--;
   15031 	}
   15032 	device_printf(sc->sc_dev,
   15033 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15034 	    mask, swfw_sync);
   15035 	return -1;
   15036 }
   15037 
   15038 static void
   15039 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15040 {
   15041 	uint32_t swfw_sync;
   15042 
   15043 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15044 		device_xname(sc->sc_dev), __func__));
   15045 
   15046 	while (wm_get_swsm_semaphore(sc) != 0)
   15047 		continue;
   15048 
   15049 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15050 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15051 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15052 
   15053 	wm_put_swsm_semaphore(sc);
   15054 }
   15055 
   15056 static int
   15057 wm_get_nvm_80003(struct wm_softc *sc)
   15058 {
   15059 	int rv;
   15060 
   15061 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15062 		device_xname(sc->sc_dev), __func__));
   15063 
   15064 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15065 		aprint_error_dev(sc->sc_dev,
   15066 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15067 		return rv;
   15068 	}
   15069 
   15070 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15071 	    && (rv = wm_get_eecd(sc)) != 0) {
   15072 		aprint_error_dev(sc->sc_dev,
   15073 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15074 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15075 		return rv;
   15076 	}
   15077 
   15078 	return 0;
   15079 }
   15080 
   15081 static void
   15082 wm_put_nvm_80003(struct wm_softc *sc)
   15083 {
   15084 
   15085 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15086 		device_xname(sc->sc_dev), __func__));
   15087 
   15088 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15089 		wm_put_eecd(sc);
   15090 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15091 }
   15092 
   15093 static int
   15094 wm_get_nvm_82571(struct wm_softc *sc)
   15095 {
   15096 	int rv;
   15097 
   15098 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15099 		device_xname(sc->sc_dev), __func__));
   15100 
   15101 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15102 		return rv;
   15103 
   15104 	switch (sc->sc_type) {
   15105 	case WM_T_82573:
   15106 		break;
   15107 	default:
   15108 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15109 			rv = wm_get_eecd(sc);
   15110 		break;
   15111 	}
   15112 
   15113 	if (rv != 0) {
   15114 		aprint_error_dev(sc->sc_dev,
   15115 		    "%s: failed to get semaphore\n",
   15116 		    __func__);
   15117 		wm_put_swsm_semaphore(sc);
   15118 	}
   15119 
   15120 	return rv;
   15121 }
   15122 
   15123 static void
   15124 wm_put_nvm_82571(struct wm_softc *sc)
   15125 {
   15126 
   15127 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15128 		device_xname(sc->sc_dev), __func__));
   15129 
   15130 	switch (sc->sc_type) {
   15131 	case WM_T_82573:
   15132 		break;
   15133 	default:
   15134 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15135 			wm_put_eecd(sc);
   15136 		break;
   15137 	}
   15138 
   15139 	wm_put_swsm_semaphore(sc);
   15140 }
   15141 
   15142 static int
   15143 wm_get_phy_82575(struct wm_softc *sc)
   15144 {
   15145 
   15146 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15147 		device_xname(sc->sc_dev), __func__));
   15148 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15149 }
   15150 
   15151 static void
   15152 wm_put_phy_82575(struct wm_softc *sc)
   15153 {
   15154 
   15155 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15156 		device_xname(sc->sc_dev), __func__));
   15157 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15158 }
   15159 
   15160 static int
   15161 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15162 {
   15163 	uint32_t ext_ctrl;
   15164 	int timeout = 200;
   15165 
   15166 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15167 		device_xname(sc->sc_dev), __func__));
   15168 
   15169 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15170 	for (timeout = 0; timeout < 200; timeout++) {
   15171 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15172 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15173 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15174 
   15175 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15176 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15177 			return 0;
   15178 		delay(5000);
   15179 	}
   15180 	device_printf(sc->sc_dev,
   15181 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15182 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15183 	return -1;
   15184 }
   15185 
   15186 static void
   15187 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15188 {
   15189 	uint32_t ext_ctrl;
   15190 
   15191 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15192 		device_xname(sc->sc_dev), __func__));
   15193 
   15194 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15195 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15196 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15197 
   15198 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15199 }
   15200 
   15201 static int
   15202 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15203 {
   15204 	uint32_t ext_ctrl;
   15205 	int timeout;
   15206 
   15207 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15208 		device_xname(sc->sc_dev), __func__));
   15209 	mutex_enter(sc->sc_ich_phymtx);
   15210 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15211 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15212 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15213 			break;
   15214 		delay(1000);
   15215 	}
   15216 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15217 		device_printf(sc->sc_dev,
   15218 		    "SW has already locked the resource\n");
   15219 		goto out;
   15220 	}
   15221 
   15222 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15223 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15224 	for (timeout = 0; timeout < 1000; timeout++) {
   15225 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15226 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15227 			break;
   15228 		delay(1000);
   15229 	}
   15230 	if (timeout >= 1000) {
   15231 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15232 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15233 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15234 		goto out;
   15235 	}
   15236 	return 0;
   15237 
   15238 out:
   15239 	mutex_exit(sc->sc_ich_phymtx);
   15240 	return -1;
   15241 }
   15242 
   15243 static void
   15244 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15245 {
   15246 	uint32_t ext_ctrl;
   15247 
   15248 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15249 		device_xname(sc->sc_dev), __func__));
   15250 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15251 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15252 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15253 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15254 	} else
   15255 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15256 
   15257 	mutex_exit(sc->sc_ich_phymtx);
   15258 }
   15259 
   15260 static int
   15261 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15262 {
   15263 
   15264 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15265 		device_xname(sc->sc_dev), __func__));
   15266 	mutex_enter(sc->sc_ich_nvmmtx);
   15267 
   15268 	return 0;
   15269 }
   15270 
   15271 static void
   15272 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15273 {
   15274 
   15275 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15276 		device_xname(sc->sc_dev), __func__));
   15277 	mutex_exit(sc->sc_ich_nvmmtx);
   15278 }
   15279 
   15280 static int
   15281 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15282 {
   15283 	int i = 0;
   15284 	uint32_t reg;
   15285 
   15286 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15287 		device_xname(sc->sc_dev), __func__));
   15288 
   15289 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15290 	do {
   15291 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15292 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15293 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15294 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15295 			break;
   15296 		delay(2*1000);
   15297 		i++;
   15298 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15299 
   15300 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15301 		wm_put_hw_semaphore_82573(sc);
   15302 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15303 		    device_xname(sc->sc_dev));
   15304 		return -1;
   15305 	}
   15306 
   15307 	return 0;
   15308 }
   15309 
   15310 static void
   15311 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15312 {
   15313 	uint32_t reg;
   15314 
   15315 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15316 		device_xname(sc->sc_dev), __func__));
   15317 
   15318 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15319 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15320 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15321 }
   15322 
   15323 /*
   15324  * Management mode and power management related subroutines.
   15325  * BMC, AMT, suspend/resume and EEE.
   15326  */
   15327 
   15328 #ifdef WM_WOL
   15329 static int
   15330 wm_check_mng_mode(struct wm_softc *sc)
   15331 {
   15332 	int rv;
   15333 
   15334 	switch (sc->sc_type) {
   15335 	case WM_T_ICH8:
   15336 	case WM_T_ICH9:
   15337 	case WM_T_ICH10:
   15338 	case WM_T_PCH:
   15339 	case WM_T_PCH2:
   15340 	case WM_T_PCH_LPT:
   15341 	case WM_T_PCH_SPT:
   15342 	case WM_T_PCH_CNP:
   15343 		rv = wm_check_mng_mode_ich8lan(sc);
   15344 		break;
   15345 	case WM_T_82574:
   15346 	case WM_T_82583:
   15347 		rv = wm_check_mng_mode_82574(sc);
   15348 		break;
   15349 	case WM_T_82571:
   15350 	case WM_T_82572:
   15351 	case WM_T_82573:
   15352 	case WM_T_80003:
   15353 		rv = wm_check_mng_mode_generic(sc);
   15354 		break;
   15355 	default:
   15356 		/* Noting to do */
   15357 		rv = 0;
   15358 		break;
   15359 	}
   15360 
   15361 	return rv;
   15362 }
   15363 
   15364 static int
   15365 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15366 {
   15367 	uint32_t fwsm;
   15368 
   15369 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15370 
   15371 	if (((fwsm & FWSM_FW_VALID) != 0)
   15372 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15373 		return 1;
   15374 
   15375 	return 0;
   15376 }
   15377 
   15378 static int
   15379 wm_check_mng_mode_82574(struct wm_softc *sc)
   15380 {
   15381 	uint16_t data;
   15382 
   15383 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15384 
   15385 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15386 		return 1;
   15387 
   15388 	return 0;
   15389 }
   15390 
   15391 static int
   15392 wm_check_mng_mode_generic(struct wm_softc *sc)
   15393 {
   15394 	uint32_t fwsm;
   15395 
   15396 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15397 
   15398 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15399 		return 1;
   15400 
   15401 	return 0;
   15402 }
   15403 #endif /* WM_WOL */
   15404 
   15405 static int
   15406 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15407 {
   15408 	uint32_t manc, fwsm, factps;
   15409 
   15410 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15411 		return 0;
   15412 
   15413 	manc = CSR_READ(sc, WMREG_MANC);
   15414 
   15415 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15416 		device_xname(sc->sc_dev), manc));
   15417 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15418 		return 0;
   15419 
   15420 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15421 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15422 		factps = CSR_READ(sc, WMREG_FACTPS);
   15423 		if (((factps & FACTPS_MNGCG) == 0)
   15424 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15425 			return 1;
   15426 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15427 		uint16_t data;
   15428 
   15429 		factps = CSR_READ(sc, WMREG_FACTPS);
   15430 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15431 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15432 			device_xname(sc->sc_dev), factps, data));
   15433 		if (((factps & FACTPS_MNGCG) == 0)
   15434 		    && ((data & NVM_CFG2_MNGM_MASK)
   15435 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15436 			return 1;
   15437 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15438 	    && ((manc & MANC_ASF_EN) == 0))
   15439 		return 1;
   15440 
   15441 	return 0;
   15442 }
   15443 
   15444 static bool
   15445 wm_phy_resetisblocked(struct wm_softc *sc)
   15446 {
   15447 	bool blocked = false;
   15448 	uint32_t reg;
   15449 	int i = 0;
   15450 
   15451 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15452 		device_xname(sc->sc_dev), __func__));
   15453 
   15454 	switch (sc->sc_type) {
   15455 	case WM_T_ICH8:
   15456 	case WM_T_ICH9:
   15457 	case WM_T_ICH10:
   15458 	case WM_T_PCH:
   15459 	case WM_T_PCH2:
   15460 	case WM_T_PCH_LPT:
   15461 	case WM_T_PCH_SPT:
   15462 	case WM_T_PCH_CNP:
   15463 		do {
   15464 			reg = CSR_READ(sc, WMREG_FWSM);
   15465 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15466 				blocked = true;
   15467 				delay(10*1000);
   15468 				continue;
   15469 			}
   15470 			blocked = false;
   15471 		} while (blocked && (i++ < 30));
   15472 		return blocked;
   15473 		break;
   15474 	case WM_T_82571:
   15475 	case WM_T_82572:
   15476 	case WM_T_82573:
   15477 	case WM_T_82574:
   15478 	case WM_T_82583:
   15479 	case WM_T_80003:
   15480 		reg = CSR_READ(sc, WMREG_MANC);
   15481 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15482 			return true;
   15483 		else
   15484 			return false;
   15485 		break;
   15486 	default:
   15487 		/* No problem */
   15488 		break;
   15489 	}
   15490 
   15491 	return false;
   15492 }
   15493 
   15494 static void
   15495 wm_get_hw_control(struct wm_softc *sc)
   15496 {
   15497 	uint32_t reg;
   15498 
   15499 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15500 		device_xname(sc->sc_dev), __func__));
   15501 
   15502 	if (sc->sc_type == WM_T_82573) {
   15503 		reg = CSR_READ(sc, WMREG_SWSM);
   15504 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15505 	} else if (sc->sc_type >= WM_T_82571) {
   15506 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15507 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15508 	}
   15509 }
   15510 
   15511 static void
   15512 wm_release_hw_control(struct wm_softc *sc)
   15513 {
   15514 	uint32_t reg;
   15515 
   15516 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15517 		device_xname(sc->sc_dev), __func__));
   15518 
   15519 	if (sc->sc_type == WM_T_82573) {
   15520 		reg = CSR_READ(sc, WMREG_SWSM);
   15521 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15522 	} else if (sc->sc_type >= WM_T_82571) {
   15523 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15524 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15525 	}
   15526 }
   15527 
   15528 static void
   15529 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15530 {
   15531 	uint32_t reg;
   15532 
   15533 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15534 		device_xname(sc->sc_dev), __func__));
   15535 
   15536 	if (sc->sc_type < WM_T_PCH2)
   15537 		return;
   15538 
   15539 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15540 
   15541 	if (gate)
   15542 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15543 	else
   15544 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15545 
   15546 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15547 }
   15548 
   15549 static int
   15550 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15551 {
   15552 	uint32_t fwsm, reg;
   15553 	int rv;
   15554 
   15555 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15556 		device_xname(sc->sc_dev), __func__));
   15557 
   15558 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15559 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15560 
   15561 	/* Disable ULP */
   15562 	wm_ulp_disable(sc);
   15563 
   15564 	/* Acquire PHY semaphore */
   15565 	rv = sc->phy.acquire(sc);
   15566 	if (rv != 0) {
   15567 		DPRINTF(sc, WM_DEBUG_INIT,
   15568 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   15569 		return rv;
   15570 	}
   15571 
   15572 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15573 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15574 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15575 	 */
   15576 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15577 	switch (sc->sc_type) {
   15578 	case WM_T_PCH_LPT:
   15579 	case WM_T_PCH_SPT:
   15580 	case WM_T_PCH_CNP:
   15581 		if (wm_phy_is_accessible_pchlan(sc))
   15582 			break;
   15583 
   15584 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15585 		 * forcing MAC to SMBus mode first.
   15586 		 */
   15587 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15588 		reg |= CTRL_EXT_FORCE_SMBUS;
   15589 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15590 #if 0
   15591 		/* XXX Isn't this required??? */
   15592 		CSR_WRITE_FLUSH(sc);
   15593 #endif
   15594 		/* Wait 50 milliseconds for MAC to finish any retries
   15595 		 * that it might be trying to perform from previous
   15596 		 * attempts to acknowledge any phy read requests.
   15597 		 */
   15598 		delay(50 * 1000);
   15599 		/* FALLTHROUGH */
   15600 	case WM_T_PCH2:
   15601 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15602 			break;
   15603 		/* FALLTHROUGH */
   15604 	case WM_T_PCH:
   15605 		if (sc->sc_type == WM_T_PCH)
   15606 			if ((fwsm & FWSM_FW_VALID) != 0)
   15607 				break;
   15608 
   15609 		if (wm_phy_resetisblocked(sc) == true) {
   15610 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   15611 			break;
   15612 		}
   15613 
   15614 		/* Toggle LANPHYPC Value bit */
   15615 		wm_toggle_lanphypc_pch_lpt(sc);
   15616 
   15617 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15618 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15619 				break;
   15620 
   15621 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15622 			 * so ensure that the MAC is also out of SMBus mode
   15623 			 */
   15624 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15625 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15626 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15627 
   15628 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15629 				break;
   15630 			rv = -1;
   15631 		}
   15632 		break;
   15633 	default:
   15634 		break;
   15635 	}
   15636 
   15637 	/* Release semaphore */
   15638 	sc->phy.release(sc);
   15639 
   15640 	if (rv == 0) {
   15641 		/* Check to see if able to reset PHY.  Print error if not */
   15642 		if (wm_phy_resetisblocked(sc)) {
   15643 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15644 			goto out;
   15645 		}
   15646 
   15647 		/* Reset the PHY before any access to it.  Doing so, ensures
   15648 		 * that the PHY is in a known good state before we read/write
   15649 		 * PHY registers.  The generic reset is sufficient here,
   15650 		 * because we haven't determined the PHY type yet.
   15651 		 */
   15652 		if (wm_reset_phy(sc) != 0)
   15653 			goto out;
   15654 
   15655 		/* On a successful reset, possibly need to wait for the PHY
   15656 		 * to quiesce to an accessible state before returning control
   15657 		 * to the calling function.  If the PHY does not quiesce, then
   15658 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15659 		 *  the PHY is in.
   15660 		 */
   15661 		if (wm_phy_resetisblocked(sc))
   15662 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15663 	}
   15664 
   15665 out:
   15666 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15667 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15668 		delay(10*1000);
   15669 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15670 	}
   15671 
   15672 	return 0;
   15673 }
   15674 
   15675 static void
   15676 wm_init_manageability(struct wm_softc *sc)
   15677 {
   15678 
   15679 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15680 		device_xname(sc->sc_dev), __func__));
   15681 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   15682 
   15683 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15684 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15685 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15686 
   15687 		/* Disable hardware interception of ARP */
   15688 		manc &= ~MANC_ARP_EN;
   15689 
   15690 		/* Enable receiving management packets to the host */
   15691 		if (sc->sc_type >= WM_T_82571) {
   15692 			manc |= MANC_EN_MNG2HOST;
   15693 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15694 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15695 		}
   15696 
   15697 		CSR_WRITE(sc, WMREG_MANC, manc);
   15698 	}
   15699 }
   15700 
   15701 static void
   15702 wm_release_manageability(struct wm_softc *sc)
   15703 {
   15704 
   15705 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15706 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15707 
   15708 		manc |= MANC_ARP_EN;
   15709 		if (sc->sc_type >= WM_T_82571)
   15710 			manc &= ~MANC_EN_MNG2HOST;
   15711 
   15712 		CSR_WRITE(sc, WMREG_MANC, manc);
   15713 	}
   15714 }
   15715 
   15716 static void
   15717 wm_get_wakeup(struct wm_softc *sc)
   15718 {
   15719 
   15720 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15721 	switch (sc->sc_type) {
   15722 	case WM_T_82573:
   15723 	case WM_T_82583:
   15724 		sc->sc_flags |= WM_F_HAS_AMT;
   15725 		/* FALLTHROUGH */
   15726 	case WM_T_80003:
   15727 	case WM_T_82575:
   15728 	case WM_T_82576:
   15729 	case WM_T_82580:
   15730 	case WM_T_I350:
   15731 	case WM_T_I354:
   15732 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15733 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15734 		/* FALLTHROUGH */
   15735 	case WM_T_82541:
   15736 	case WM_T_82541_2:
   15737 	case WM_T_82547:
   15738 	case WM_T_82547_2:
   15739 	case WM_T_82571:
   15740 	case WM_T_82572:
   15741 	case WM_T_82574:
   15742 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15743 		break;
   15744 	case WM_T_ICH8:
   15745 	case WM_T_ICH9:
   15746 	case WM_T_ICH10:
   15747 	case WM_T_PCH:
   15748 	case WM_T_PCH2:
   15749 	case WM_T_PCH_LPT:
   15750 	case WM_T_PCH_SPT:
   15751 	case WM_T_PCH_CNP:
   15752 		sc->sc_flags |= WM_F_HAS_AMT;
   15753 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15754 		break;
   15755 	default:
   15756 		break;
   15757 	}
   15758 
   15759 	/* 1: HAS_MANAGE */
   15760 	if (wm_enable_mng_pass_thru(sc) != 0)
   15761 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15762 
   15763 	/*
   15764 	 * Note that the WOL flags is set after the resetting of the eeprom
   15765 	 * stuff
   15766 	 */
   15767 }
   15768 
   15769 /*
   15770  * Unconfigure Ultra Low Power mode.
   15771  * Only for I217 and newer (see below).
   15772  */
   15773 static int
   15774 wm_ulp_disable(struct wm_softc *sc)
   15775 {
   15776 	uint32_t reg;
   15777 	uint16_t phyreg;
   15778 	int i = 0, rv;
   15779 
   15780 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15781 		device_xname(sc->sc_dev), __func__));
   15782 	/* Exclude old devices */
   15783 	if ((sc->sc_type < WM_T_PCH_LPT)
   15784 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15785 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15786 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15787 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15788 		return 0;
   15789 
   15790 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15791 		/* Request ME un-configure ULP mode in the PHY */
   15792 		reg = CSR_READ(sc, WMREG_H2ME);
   15793 		reg &= ~H2ME_ULP;
   15794 		reg |= H2ME_ENFORCE_SETTINGS;
   15795 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15796 
   15797 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15798 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15799 			if (i++ == 30) {
   15800 				device_printf(sc->sc_dev, "%s timed out\n",
   15801 				    __func__);
   15802 				return -1;
   15803 			}
   15804 			delay(10 * 1000);
   15805 		}
   15806 		reg = CSR_READ(sc, WMREG_H2ME);
   15807 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15808 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15809 
   15810 		return 0;
   15811 	}
   15812 
   15813 	/* Acquire semaphore */
   15814 	rv = sc->phy.acquire(sc);
   15815 	if (rv != 0) {
   15816 		DPRINTF(sc, WM_DEBUG_INIT,
   15817 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   15818 		return rv;
   15819 	}
   15820 
   15821 	/* Toggle LANPHYPC */
   15822 	wm_toggle_lanphypc_pch_lpt(sc);
   15823 
   15824 	/* Unforce SMBus mode in PHY */
   15825 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15826 	if (rv != 0) {
   15827 		uint32_t reg2;
   15828 
   15829 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15830 		    __func__);
   15831 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15832 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15833 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15834 		delay(50 * 1000);
   15835 
   15836 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15837 		    &phyreg);
   15838 		if (rv != 0)
   15839 			goto release;
   15840 	}
   15841 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15842 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15843 
   15844 	/* Unforce SMBus mode in MAC */
   15845 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15846 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15847 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15848 
   15849 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15850 	if (rv != 0)
   15851 		goto release;
   15852 	phyreg |= HV_PM_CTRL_K1_ENA;
   15853 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15854 
   15855 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15856 	    &phyreg);
   15857 	if (rv != 0)
   15858 		goto release;
   15859 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15860 	    | I218_ULP_CONFIG1_STICKY_ULP
   15861 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15862 	    | I218_ULP_CONFIG1_WOL_HOST
   15863 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15864 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15865 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15866 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15867 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15868 	phyreg |= I218_ULP_CONFIG1_START;
   15869 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15870 
   15871 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15872 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15873 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15874 
   15875 release:
   15876 	/* Release semaphore */
   15877 	sc->phy.release(sc);
   15878 	wm_gmii_reset(sc);
   15879 	delay(50 * 1000);
   15880 
   15881 	return rv;
   15882 }
   15883 
   15884 /* WOL in the newer chipset interfaces (pchlan) */
   15885 static int
   15886 wm_enable_phy_wakeup(struct wm_softc *sc)
   15887 {
   15888 	device_t dev = sc->sc_dev;
   15889 	uint32_t mreg, moff;
   15890 	uint16_t wuce, wuc, wufc, preg;
   15891 	int i, rv;
   15892 
   15893 	KASSERT(sc->sc_type >= WM_T_PCH);
   15894 
   15895 	/* Copy MAC RARs to PHY RARs */
   15896 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15897 
   15898 	/* Activate PHY wakeup */
   15899 	rv = sc->phy.acquire(sc);
   15900 	if (rv != 0) {
   15901 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15902 		    __func__);
   15903 		return rv;
   15904 	}
   15905 
   15906 	/*
   15907 	 * Enable access to PHY wakeup registers.
   15908 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15909 	 */
   15910 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15911 	if (rv != 0) {
   15912 		device_printf(dev,
   15913 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15914 		goto release;
   15915 	}
   15916 
   15917 	/* Copy MAC MTA to PHY MTA */
   15918 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15919 		uint16_t lo, hi;
   15920 
   15921 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15922 		lo = (uint16_t)(mreg & 0xffff);
   15923 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15924 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15925 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15926 	}
   15927 
   15928 	/* Configure PHY Rx Control register */
   15929 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15930 	mreg = CSR_READ(sc, WMREG_RCTL);
   15931 	if (mreg & RCTL_UPE)
   15932 		preg |= BM_RCTL_UPE;
   15933 	if (mreg & RCTL_MPE)
   15934 		preg |= BM_RCTL_MPE;
   15935 	preg &= ~(BM_RCTL_MO_MASK);
   15936 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15937 	if (moff != 0)
   15938 		preg |= moff << BM_RCTL_MO_SHIFT;
   15939 	if (mreg & RCTL_BAM)
   15940 		preg |= BM_RCTL_BAM;
   15941 	if (mreg & RCTL_PMCF)
   15942 		preg |= BM_RCTL_PMCF;
   15943 	mreg = CSR_READ(sc, WMREG_CTRL);
   15944 	if (mreg & CTRL_RFCE)
   15945 		preg |= BM_RCTL_RFCE;
   15946 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15947 
   15948 	wuc = WUC_APME | WUC_PME_EN;
   15949 	wufc = WUFC_MAG;
   15950 	/* Enable PHY wakeup in MAC register */
   15951 	CSR_WRITE(sc, WMREG_WUC,
   15952 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15953 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15954 
   15955 	/* Configure and enable PHY wakeup in PHY registers */
   15956 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15957 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15958 
   15959 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15960 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15961 
   15962 release:
   15963 	sc->phy.release(sc);
   15964 
   15965 	return 0;
   15966 }
   15967 
   15968 /* Power down workaround on D3 */
   15969 static void
   15970 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15971 {
   15972 	uint32_t reg;
   15973 	uint16_t phyreg;
   15974 	int i;
   15975 
   15976 	for (i = 0; i < 2; i++) {
   15977 		/* Disable link */
   15978 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15979 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15980 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15981 
   15982 		/*
   15983 		 * Call gig speed drop workaround on Gig disable before
   15984 		 * accessing any PHY registers
   15985 		 */
   15986 		if (sc->sc_type == WM_T_ICH8)
   15987 			wm_gig_downshift_workaround_ich8lan(sc);
   15988 
   15989 		/* Write VR power-down enable */
   15990 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15991 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15992 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15993 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15994 
   15995 		/* Read it back and test */
   15996 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15997 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15998 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15999 			break;
   16000 
   16001 		/* Issue PHY reset and repeat at most one more time */
   16002 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16003 	}
   16004 }
   16005 
   16006 /*
   16007  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16008  *  @sc: pointer to the HW structure
   16009  *
   16010  *  During S0 to Sx transition, it is possible the link remains at gig
   16011  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16012  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16013  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16014  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16015  *  needs to be written.
   16016  *  Parts that support (and are linked to a partner which support) EEE in
   16017  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16018  *  than 10Mbps w/o EEE.
   16019  */
   16020 static void
   16021 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16022 {
   16023 	device_t dev = sc->sc_dev;
   16024 	struct ethercom *ec = &sc->sc_ethercom;
   16025 	uint32_t phy_ctrl;
   16026 	int rv;
   16027 
   16028 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16029 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16030 
   16031 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16032 
   16033 	if (sc->sc_phytype == WMPHY_I217) {
   16034 		uint16_t devid = sc->sc_pcidevid;
   16035 
   16036 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16037 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16038 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16039 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16040 		    (sc->sc_type >= WM_T_PCH_SPT))
   16041 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16042 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16043 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16044 
   16045 		if (sc->phy.acquire(sc) != 0)
   16046 			goto out;
   16047 
   16048 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16049 			uint16_t eee_advert;
   16050 
   16051 			rv = wm_read_emi_reg_locked(dev,
   16052 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16053 			if (rv)
   16054 				goto release;
   16055 
   16056 			/*
   16057 			 * Disable LPLU if both link partners support 100BaseT
   16058 			 * EEE and 100Full is advertised on both ends of the
   16059 			 * link, and enable Auto Enable LPI since there will
   16060 			 * be no driver to enable LPI while in Sx.
   16061 			 */
   16062 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16063 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16064 				uint16_t anar, phy_reg;
   16065 
   16066 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16067 				    &anar);
   16068 				if (anar & ANAR_TX_FD) {
   16069 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16070 					    PHY_CTRL_NOND0A_LPLU);
   16071 
   16072 					/* Set Auto Enable LPI after link up */
   16073 					sc->phy.readreg_locked(dev, 2,
   16074 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16075 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16076 					sc->phy.writereg_locked(dev, 2,
   16077 					    I217_LPI_GPIO_CTRL, phy_reg);
   16078 				}
   16079 			}
   16080 		}
   16081 
   16082 		/*
   16083 		 * For i217 Intel Rapid Start Technology support,
   16084 		 * when the system is going into Sx and no manageability engine
   16085 		 * is present, the driver must configure proxy to reset only on
   16086 		 * power good.	LPI (Low Power Idle) state must also reset only
   16087 		 * on power good, as well as the MTA (Multicast table array).
   16088 		 * The SMBus release must also be disabled on LCD reset.
   16089 		 */
   16090 
   16091 		/*
   16092 		 * Enable MTA to reset for Intel Rapid Start Technology
   16093 		 * Support
   16094 		 */
   16095 
   16096 release:
   16097 		sc->phy.release(sc);
   16098 	}
   16099 out:
   16100 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16101 
   16102 	if (sc->sc_type == WM_T_ICH8)
   16103 		wm_gig_downshift_workaround_ich8lan(sc);
   16104 
   16105 	if (sc->sc_type >= WM_T_PCH) {
   16106 		wm_oem_bits_config_ich8lan(sc, false);
   16107 
   16108 		/* Reset PHY to activate OEM bits on 82577/8 */
   16109 		if (sc->sc_type == WM_T_PCH)
   16110 			wm_reset_phy(sc);
   16111 
   16112 		if (sc->phy.acquire(sc) != 0)
   16113 			return;
   16114 		wm_write_smbus_addr(sc);
   16115 		sc->phy.release(sc);
   16116 	}
   16117 }
   16118 
   16119 /*
   16120  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16121  *  @sc: pointer to the HW structure
   16122  *
   16123  *  During Sx to S0 transitions on non-managed devices or managed devices
   16124  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16125  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16126  *  the PHY.
   16127  *  On i217, setup Intel Rapid Start Technology.
   16128  */
   16129 static int
   16130 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16131 {
   16132 	device_t dev = sc->sc_dev;
   16133 	int rv;
   16134 
   16135 	if (sc->sc_type < WM_T_PCH2)
   16136 		return 0;
   16137 
   16138 	rv = wm_init_phy_workarounds_pchlan(sc);
   16139 	if (rv != 0)
   16140 		return rv;
   16141 
   16142 	/* For i217 Intel Rapid Start Technology support when the system
   16143 	 * is transitioning from Sx and no manageability engine is present
   16144 	 * configure SMBus to restore on reset, disable proxy, and enable
   16145 	 * the reset on MTA (Multicast table array).
   16146 	 */
   16147 	if (sc->sc_phytype == WMPHY_I217) {
   16148 		uint16_t phy_reg;
   16149 
   16150 		rv = sc->phy.acquire(sc);
   16151 		if (rv != 0)
   16152 			return rv;
   16153 
   16154 		/* Clear Auto Enable LPI after link up */
   16155 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16156 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16157 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16158 
   16159 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16160 			/* Restore clear on SMB if no manageability engine
   16161 			 * is present
   16162 			 */
   16163 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16164 			    &phy_reg);
   16165 			if (rv != 0)
   16166 				goto release;
   16167 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16168 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16169 
   16170 			/* Disable Proxy */
   16171 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16172 		}
   16173 		/* Enable reset on MTA */
   16174 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16175 		if (rv != 0)
   16176 			goto release;
   16177 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16178 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16179 
   16180 release:
   16181 		sc->phy.release(sc);
   16182 		return rv;
   16183 	}
   16184 
   16185 	return 0;
   16186 }
   16187 
   16188 static void
   16189 wm_enable_wakeup(struct wm_softc *sc)
   16190 {
   16191 	uint32_t reg, pmreg;
   16192 	pcireg_t pmode;
   16193 	int rv = 0;
   16194 
   16195 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16196 		device_xname(sc->sc_dev), __func__));
   16197 
   16198 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16199 	    &pmreg, NULL) == 0)
   16200 		return;
   16201 
   16202 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16203 		goto pme;
   16204 
   16205 	/* Advertise the wakeup capability */
   16206 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16207 	    | CTRL_SWDPIN(3));
   16208 
   16209 	/* Keep the laser running on fiber adapters */
   16210 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16211 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16212 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16213 		reg |= CTRL_EXT_SWDPIN(3);
   16214 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16215 	}
   16216 
   16217 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16218 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16219 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16220 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16221 		wm_suspend_workarounds_ich8lan(sc);
   16222 
   16223 #if 0	/* For the multicast packet */
   16224 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16225 	reg |= WUFC_MC;
   16226 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16227 #endif
   16228 
   16229 	if (sc->sc_type >= WM_T_PCH) {
   16230 		rv = wm_enable_phy_wakeup(sc);
   16231 		if (rv != 0)
   16232 			goto pme;
   16233 	} else {
   16234 		/* Enable wakeup by the MAC */
   16235 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16236 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16237 	}
   16238 
   16239 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16240 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16241 		|| (sc->sc_type == WM_T_PCH2))
   16242 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16243 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16244 
   16245 pme:
   16246 	/* Request PME */
   16247 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16248 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16249 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16250 		/* For WOL */
   16251 		pmode |= PCI_PMCSR_PME_EN;
   16252 	} else {
   16253 		/* Disable WOL */
   16254 		pmode &= ~PCI_PMCSR_PME_EN;
   16255 	}
   16256 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16257 }
   16258 
   16259 /* Disable ASPM L0s and/or L1 for workaround */
   16260 static void
   16261 wm_disable_aspm(struct wm_softc *sc)
   16262 {
   16263 	pcireg_t reg, mask = 0;
   16264 	unsigned const char *str = "";
   16265 
   16266 	/*
   16267 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16268 	 * space.
   16269 	 */
   16270 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16271 		return;
   16272 
   16273 	switch (sc->sc_type) {
   16274 	case WM_T_82571:
   16275 	case WM_T_82572:
   16276 		/*
   16277 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16278 		 * State Power management L1 State (ASPM L1).
   16279 		 */
   16280 		mask = PCIE_LCSR_ASPM_L1;
   16281 		str = "L1 is";
   16282 		break;
   16283 	case WM_T_82573:
   16284 	case WM_T_82574:
   16285 	case WM_T_82583:
   16286 		/*
   16287 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16288 		 *
   16289 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16290 		 * some chipset.  The document of 82574 and 82583 says that
   16291 		 * disabling L0s with some specific chipset is sufficient,
   16292 		 * but we follow as of the Intel em driver does.
   16293 		 *
   16294 		 * References:
   16295 		 * Errata 8 of the Specification Update of i82573.
   16296 		 * Errata 20 of the Specification Update of i82574.
   16297 		 * Errata 9 of the Specification Update of i82583.
   16298 		 */
   16299 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16300 		str = "L0s and L1 are";
   16301 		break;
   16302 	default:
   16303 		return;
   16304 	}
   16305 
   16306 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16307 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16308 	reg &= ~mask;
   16309 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16310 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16311 
   16312 	/* Print only in wm_attach() */
   16313 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16314 		aprint_verbose_dev(sc->sc_dev,
   16315 		    "ASPM %s disabled to workaround the errata.\n", str);
   16316 }
   16317 
   16318 /* LPLU */
   16319 
   16320 static void
   16321 wm_lplu_d0_disable(struct wm_softc *sc)
   16322 {
   16323 	struct mii_data *mii = &sc->sc_mii;
   16324 	uint32_t reg;
   16325 	uint16_t phyval;
   16326 
   16327 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16328 		device_xname(sc->sc_dev), __func__));
   16329 
   16330 	if (sc->sc_phytype == WMPHY_IFE)
   16331 		return;
   16332 
   16333 	switch (sc->sc_type) {
   16334 	case WM_T_82571:
   16335 	case WM_T_82572:
   16336 	case WM_T_82573:
   16337 	case WM_T_82575:
   16338 	case WM_T_82576:
   16339 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16340 		phyval &= ~PMR_D0_LPLU;
   16341 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16342 		break;
   16343 	case WM_T_82580:
   16344 	case WM_T_I350:
   16345 	case WM_T_I210:
   16346 	case WM_T_I211:
   16347 		reg = CSR_READ(sc, WMREG_PHPM);
   16348 		reg &= ~PHPM_D0A_LPLU;
   16349 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16350 		break;
   16351 	case WM_T_82574:
   16352 	case WM_T_82583:
   16353 	case WM_T_ICH8:
   16354 	case WM_T_ICH9:
   16355 	case WM_T_ICH10:
   16356 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16357 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16358 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16359 		CSR_WRITE_FLUSH(sc);
   16360 		break;
   16361 	case WM_T_PCH:
   16362 	case WM_T_PCH2:
   16363 	case WM_T_PCH_LPT:
   16364 	case WM_T_PCH_SPT:
   16365 	case WM_T_PCH_CNP:
   16366 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16367 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16368 		if (wm_phy_resetisblocked(sc) == false)
   16369 			phyval |= HV_OEM_BITS_ANEGNOW;
   16370 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16371 		break;
   16372 	default:
   16373 		break;
   16374 	}
   16375 }
   16376 
   16377 /* EEE */
   16378 
   16379 static int
   16380 wm_set_eee_i350(struct wm_softc *sc)
   16381 {
   16382 	struct ethercom *ec = &sc->sc_ethercom;
   16383 	uint32_t ipcnfg, eeer;
   16384 	uint32_t ipcnfg_mask
   16385 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16386 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16387 
   16388 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16389 
   16390 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16391 	eeer = CSR_READ(sc, WMREG_EEER);
   16392 
   16393 	/* Enable or disable per user setting */
   16394 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16395 		ipcnfg |= ipcnfg_mask;
   16396 		eeer |= eeer_mask;
   16397 	} else {
   16398 		ipcnfg &= ~ipcnfg_mask;
   16399 		eeer &= ~eeer_mask;
   16400 	}
   16401 
   16402 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16403 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16404 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16405 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16406 
   16407 	return 0;
   16408 }
   16409 
   16410 static int
   16411 wm_set_eee_pchlan(struct wm_softc *sc)
   16412 {
   16413 	device_t dev = sc->sc_dev;
   16414 	struct ethercom *ec = &sc->sc_ethercom;
   16415 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16416 	int rv;
   16417 
   16418 	switch (sc->sc_phytype) {
   16419 	case WMPHY_82579:
   16420 		lpa = I82579_EEE_LP_ABILITY;
   16421 		pcs_status = I82579_EEE_PCS_STATUS;
   16422 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16423 		break;
   16424 	case WMPHY_I217:
   16425 		lpa = I217_EEE_LP_ABILITY;
   16426 		pcs_status = I217_EEE_PCS_STATUS;
   16427 		adv_addr = I217_EEE_ADVERTISEMENT;
   16428 		break;
   16429 	default:
   16430 		return 0;
   16431 	}
   16432 
   16433 	rv = sc->phy.acquire(sc);
   16434 	if (rv != 0) {
   16435 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16436 		return rv;
   16437 	}
   16438 
   16439 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16440 	if (rv != 0)
   16441 		goto release;
   16442 
   16443 	/* Clear bits that enable EEE in various speeds */
   16444 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16445 
   16446 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16447 		/* Save off link partner's EEE ability */
   16448 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16449 		if (rv != 0)
   16450 			goto release;
   16451 
   16452 		/* Read EEE advertisement */
   16453 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16454 			goto release;
   16455 
   16456 		/*
   16457 		 * Enable EEE only for speeds in which the link partner is
   16458 		 * EEE capable and for which we advertise EEE.
   16459 		 */
   16460 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16461 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16462 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16463 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16464 			if ((data & ANLPAR_TX_FD) != 0)
   16465 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16466 			else {
   16467 				/*
   16468 				 * EEE is not supported in 100Half, so ignore
   16469 				 * partner's EEE in 100 ability if full-duplex
   16470 				 * is not advertised.
   16471 				 */
   16472 				sc->eee_lp_ability
   16473 				    &= ~AN_EEEADVERT_100_TX;
   16474 			}
   16475 		}
   16476 	}
   16477 
   16478 	if (sc->sc_phytype == WMPHY_82579) {
   16479 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16480 		if (rv != 0)
   16481 			goto release;
   16482 
   16483 		data &= ~I82579_LPI_PLL_SHUT_100;
   16484 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16485 	}
   16486 
   16487 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16488 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16489 		goto release;
   16490 
   16491 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16492 release:
   16493 	sc->phy.release(sc);
   16494 
   16495 	return rv;
   16496 }
   16497 
   16498 static int
   16499 wm_set_eee(struct wm_softc *sc)
   16500 {
   16501 	struct ethercom *ec = &sc->sc_ethercom;
   16502 
   16503 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16504 		return 0;
   16505 
   16506 	if (sc->sc_type == WM_T_I354) {
   16507 		/* I354 uses an external PHY */
   16508 		return 0; /* not yet */
   16509 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16510 		return wm_set_eee_i350(sc);
   16511 	else if (sc->sc_type >= WM_T_PCH2)
   16512 		return wm_set_eee_pchlan(sc);
   16513 
   16514 	return 0;
   16515 }
   16516 
   16517 /*
   16518  * Workarounds (mainly PHY related).
   16519  * Basically, PHY's workarounds are in the PHY drivers.
   16520  */
   16521 
   16522 /* Workaround for 82566 Kumeran PCS lock loss */
   16523 static int
   16524 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16525 {
   16526 	struct mii_data *mii = &sc->sc_mii;
   16527 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16528 	int i, reg, rv;
   16529 	uint16_t phyreg;
   16530 
   16531 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16532 		device_xname(sc->sc_dev), __func__));
   16533 
   16534 	/* If the link is not up, do nothing */
   16535 	if ((status & STATUS_LU) == 0)
   16536 		return 0;
   16537 
   16538 	/* Nothing to do if the link is other than 1Gbps */
   16539 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16540 		return 0;
   16541 
   16542 	for (i = 0; i < 10; i++) {
   16543 		/* read twice */
   16544 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16545 		if (rv != 0)
   16546 			return rv;
   16547 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16548 		if (rv != 0)
   16549 			return rv;
   16550 
   16551 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16552 			goto out;	/* GOOD! */
   16553 
   16554 		/* Reset the PHY */
   16555 		wm_reset_phy(sc);
   16556 		delay(5*1000);
   16557 	}
   16558 
   16559 	/* Disable GigE link negotiation */
   16560 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16561 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16562 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16563 
   16564 	/*
   16565 	 * Call gig speed drop workaround on Gig disable before accessing
   16566 	 * any PHY registers.
   16567 	 */
   16568 	wm_gig_downshift_workaround_ich8lan(sc);
   16569 
   16570 out:
   16571 	return 0;
   16572 }
   16573 
   16574 /*
   16575  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16576  *  @sc: pointer to the HW structure
   16577  *
   16578  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16579  *  LPLU, Gig disable, MDIC PHY reset):
   16580  *    1) Set Kumeran Near-end loopback
   16581  *    2) Clear Kumeran Near-end loopback
   16582  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16583  */
   16584 static void
   16585 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16586 {
   16587 	uint16_t kmreg;
   16588 
   16589 	/* Only for igp3 */
   16590 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16591 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16592 			return;
   16593 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16594 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16595 			return;
   16596 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16597 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16598 	}
   16599 }
   16600 
   16601 /*
   16602  * Workaround for pch's PHYs
   16603  * XXX should be moved to new PHY driver?
   16604  */
   16605 static int
   16606 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16607 {
   16608 	device_t dev = sc->sc_dev;
   16609 	struct mii_data *mii = &sc->sc_mii;
   16610 	struct mii_softc *child;
   16611 	uint16_t phy_data, phyrev = 0;
   16612 	int phytype = sc->sc_phytype;
   16613 	int rv;
   16614 
   16615 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16616 		device_xname(dev), __func__));
   16617 	KASSERT(sc->sc_type == WM_T_PCH);
   16618 
   16619 	/* Set MDIO slow mode before any other MDIO access */
   16620 	if (phytype == WMPHY_82577)
   16621 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16622 			return rv;
   16623 
   16624 	child = LIST_FIRST(&mii->mii_phys);
   16625 	if (child != NULL)
   16626 		phyrev = child->mii_mpd_rev;
   16627 
   16628 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16629 	if ((child != NULL) &&
   16630 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16631 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16632 		/* Disable generation of early preamble (0x4431) */
   16633 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16634 		    &phy_data);
   16635 		if (rv != 0)
   16636 			return rv;
   16637 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16638 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16639 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16640 		    phy_data);
   16641 		if (rv != 0)
   16642 			return rv;
   16643 
   16644 		/* Preamble tuning for SSC */
   16645 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16646 		if (rv != 0)
   16647 			return rv;
   16648 	}
   16649 
   16650 	/* 82578 */
   16651 	if (phytype == WMPHY_82578) {
   16652 		/*
   16653 		 * Return registers to default by doing a soft reset then
   16654 		 * writing 0x3140 to the control register
   16655 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16656 		 */
   16657 		if ((child != NULL) && (phyrev < 2)) {
   16658 			PHY_RESET(child);
   16659 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16660 			if (rv != 0)
   16661 				return rv;
   16662 		}
   16663 	}
   16664 
   16665 	/* Select page 0 */
   16666 	if ((rv = sc->phy.acquire(sc)) != 0)
   16667 		return rv;
   16668 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16669 	sc->phy.release(sc);
   16670 	if (rv != 0)
   16671 		return rv;
   16672 
   16673 	/*
   16674 	 * Configure the K1 Si workaround during phy reset assuming there is
   16675 	 * link so that it disables K1 if link is in 1Gbps.
   16676 	 */
   16677 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16678 		return rv;
   16679 
   16680 	/* Workaround for link disconnects on a busy hub in half duplex */
   16681 	rv = sc->phy.acquire(sc);
   16682 	if (rv)
   16683 		return rv;
   16684 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16685 	if (rv)
   16686 		goto release;
   16687 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16688 	    phy_data & 0x00ff);
   16689 	if (rv)
   16690 		goto release;
   16691 
   16692 	/* Set MSE higher to enable link to stay up when noise is high */
   16693 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16694 release:
   16695 	sc->phy.release(sc);
   16696 
   16697 	return rv;
   16698 }
   16699 
   16700 /*
   16701  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16702  *  @sc:   pointer to the HW structure
   16703  */
   16704 static void
   16705 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16706 {
   16707 
   16708 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16709 		device_xname(sc->sc_dev), __func__));
   16710 
   16711 	if (sc->phy.acquire(sc) != 0)
   16712 		return;
   16713 
   16714 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16715 
   16716 	sc->phy.release(sc);
   16717 }
   16718 
   16719 static void
   16720 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16721 {
   16722 	device_t dev = sc->sc_dev;
   16723 	uint32_t mac_reg;
   16724 	uint16_t i, wuce;
   16725 	int count;
   16726 
   16727 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16728 		device_xname(dev), __func__));
   16729 
   16730 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16731 		return;
   16732 
   16733 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16734 	count = wm_rar_count(sc);
   16735 	for (i = 0; i < count; i++) {
   16736 		uint16_t lo, hi;
   16737 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16738 		lo = (uint16_t)(mac_reg & 0xffff);
   16739 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16740 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16741 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16742 
   16743 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16744 		lo = (uint16_t)(mac_reg & 0xffff);
   16745 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16746 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16747 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16748 	}
   16749 
   16750 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16751 }
   16752 
   16753 /*
   16754  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16755  *  with 82579 PHY
   16756  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16757  */
   16758 static int
   16759 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16760 {
   16761 	device_t dev = sc->sc_dev;
   16762 	int rar_count;
   16763 	int rv;
   16764 	uint32_t mac_reg;
   16765 	uint16_t dft_ctrl, data;
   16766 	uint16_t i;
   16767 
   16768 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16769 		device_xname(dev), __func__));
   16770 
   16771 	if (sc->sc_type < WM_T_PCH2)
   16772 		return 0;
   16773 
   16774 	/* Acquire PHY semaphore */
   16775 	rv = sc->phy.acquire(sc);
   16776 	if (rv != 0)
   16777 		return rv;
   16778 
   16779 	/* Disable Rx path while enabling/disabling workaround */
   16780 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16781 	if (rv != 0)
   16782 		goto out;
   16783 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16784 	    dft_ctrl | (1 << 14));
   16785 	if (rv != 0)
   16786 		goto out;
   16787 
   16788 	if (enable) {
   16789 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16790 		 * SHRAL/H) and initial CRC values to the MAC
   16791 		 */
   16792 		rar_count = wm_rar_count(sc);
   16793 		for (i = 0; i < rar_count; i++) {
   16794 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16795 			uint32_t addr_high, addr_low;
   16796 
   16797 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16798 			if (!(addr_high & RAL_AV))
   16799 				continue;
   16800 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16801 			mac_addr[0] = (addr_low & 0xFF);
   16802 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16803 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16804 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16805 			mac_addr[4] = (addr_high & 0xFF);
   16806 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16807 
   16808 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16809 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16810 		}
   16811 
   16812 		/* Write Rx addresses to the PHY */
   16813 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16814 	}
   16815 
   16816 	/*
   16817 	 * If enable ==
   16818 	 *	true: Enable jumbo frame workaround in the MAC.
   16819 	 *	false: Write MAC register values back to h/w defaults.
   16820 	 */
   16821 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16822 	if (enable) {
   16823 		mac_reg &= ~(1 << 14);
   16824 		mac_reg |= (7 << 15);
   16825 	} else
   16826 		mac_reg &= ~(0xf << 14);
   16827 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16828 
   16829 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16830 	if (enable) {
   16831 		mac_reg |= RCTL_SECRC;
   16832 		sc->sc_rctl |= RCTL_SECRC;
   16833 		sc->sc_flags |= WM_F_CRC_STRIP;
   16834 	} else {
   16835 		mac_reg &= ~RCTL_SECRC;
   16836 		sc->sc_rctl &= ~RCTL_SECRC;
   16837 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16838 	}
   16839 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16840 
   16841 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16842 	if (rv != 0)
   16843 		goto out;
   16844 	if (enable)
   16845 		data |= 1 << 0;
   16846 	else
   16847 		data &= ~(1 << 0);
   16848 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16849 	if (rv != 0)
   16850 		goto out;
   16851 
   16852 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16853 	if (rv != 0)
   16854 		goto out;
   16855 	/*
   16856 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16857 	 * on both the enable case and the disable case. Is it correct?
   16858 	 */
   16859 	data &= ~(0xf << 8);
   16860 	data |= (0xb << 8);
   16861 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16862 	if (rv != 0)
   16863 		goto out;
   16864 
   16865 	/*
   16866 	 * If enable ==
   16867 	 *	true: Enable jumbo frame workaround in the PHY.
   16868 	 *	false: Write PHY register values back to h/w defaults.
   16869 	 */
   16870 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16871 	if (rv != 0)
   16872 		goto out;
   16873 	data &= ~(0x7F << 5);
   16874 	if (enable)
   16875 		data |= (0x37 << 5);
   16876 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16877 	if (rv != 0)
   16878 		goto out;
   16879 
   16880 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16881 	if (rv != 0)
   16882 		goto out;
   16883 	if (enable)
   16884 		data &= ~(1 << 13);
   16885 	else
   16886 		data |= (1 << 13);
   16887 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16888 	if (rv != 0)
   16889 		goto out;
   16890 
   16891 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16892 	if (rv != 0)
   16893 		goto out;
   16894 	data &= ~(0x3FF << 2);
   16895 	if (enable)
   16896 		data |= (I82579_TX_PTR_GAP << 2);
   16897 	else
   16898 		data |= (0x8 << 2);
   16899 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16900 	if (rv != 0)
   16901 		goto out;
   16902 
   16903 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16904 	    enable ? 0xf100 : 0x7e00);
   16905 	if (rv != 0)
   16906 		goto out;
   16907 
   16908 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16909 	if (rv != 0)
   16910 		goto out;
   16911 	if (enable)
   16912 		data |= 1 << 10;
   16913 	else
   16914 		data &= ~(1 << 10);
   16915 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16916 	if (rv != 0)
   16917 		goto out;
   16918 
   16919 	/* Re-enable Rx path after enabling/disabling workaround */
   16920 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16921 	    dft_ctrl & ~(1 << 14));
   16922 
   16923 out:
   16924 	sc->phy.release(sc);
   16925 
   16926 	return rv;
   16927 }
   16928 
   16929 /*
   16930  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16931  *  done after every PHY reset.
   16932  */
   16933 static int
   16934 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16935 {
   16936 	device_t dev = sc->sc_dev;
   16937 	int rv;
   16938 
   16939 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16940 		device_xname(dev), __func__));
   16941 	KASSERT(sc->sc_type == WM_T_PCH2);
   16942 
   16943 	/* Set MDIO slow mode before any other MDIO access */
   16944 	rv = wm_set_mdio_slow_mode_hv(sc);
   16945 	if (rv != 0)
   16946 		return rv;
   16947 
   16948 	rv = sc->phy.acquire(sc);
   16949 	if (rv != 0)
   16950 		return rv;
   16951 	/* Set MSE higher to enable link to stay up when noise is high */
   16952 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16953 	if (rv != 0)
   16954 		goto release;
   16955 	/* Drop link after 5 times MSE threshold was reached */
   16956 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16957 release:
   16958 	sc->phy.release(sc);
   16959 
   16960 	return rv;
   16961 }
   16962 
   16963 /**
   16964  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16965  *  @link: link up bool flag
   16966  *
   16967  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16968  *  preventing further DMA write requests.  Workaround the issue by disabling
   16969  *  the de-assertion of the clock request when in 1Gpbs mode.
   16970  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16971  *  speeds in order to avoid Tx hangs.
   16972  **/
   16973 static int
   16974 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16975 {
   16976 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16977 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16978 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16979 	uint16_t phyreg;
   16980 
   16981 	if (link && (speed == STATUS_SPEED_1000)) {
   16982 		int rv;
   16983 
   16984 		rv = sc->phy.acquire(sc);
   16985 		if (rv != 0)
   16986 			return rv;
   16987 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16988 		    &phyreg);
   16989 		if (rv != 0)
   16990 			goto release;
   16991 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16992 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16993 		if (rv != 0)
   16994 			goto release;
   16995 		delay(20);
   16996 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16997 
   16998 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16999 		    &phyreg);
   17000 release:
   17001 		sc->phy.release(sc);
   17002 		return rv;
   17003 	}
   17004 
   17005 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17006 
   17007 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17008 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17009 	    || !link
   17010 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17011 		goto update_fextnvm6;
   17012 
   17013 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17014 
   17015 	/* Clear link status transmit timeout */
   17016 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17017 	if (speed == STATUS_SPEED_100) {
   17018 		/* Set inband Tx timeout to 5x10us for 100Half */
   17019 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17020 
   17021 		/* Do not extend the K1 entry latency for 100Half */
   17022 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17023 	} else {
   17024 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17025 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17026 
   17027 		/* Extend the K1 entry latency for 10 Mbps */
   17028 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17029 	}
   17030 
   17031 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17032 
   17033 update_fextnvm6:
   17034 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17035 	return 0;
   17036 }
   17037 
   17038 /*
   17039  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17040  *  @sc:   pointer to the HW structure
   17041  *  @link: link up bool flag
   17042  *
   17043  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17044  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17045  *  If link is down, the function will restore the default K1 setting located
   17046  *  in the NVM.
   17047  */
   17048 static int
   17049 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17050 {
   17051 	int k1_enable = sc->sc_nvm_k1_enabled;
   17052 	int rv;
   17053 
   17054 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17055 		device_xname(sc->sc_dev), __func__));
   17056 
   17057 	rv = sc->phy.acquire(sc);
   17058 	if (rv != 0)
   17059 		return rv;
   17060 
   17061 	if (link) {
   17062 		k1_enable = 0;
   17063 
   17064 		/* Link stall fix for link up */
   17065 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17066 		    0x0100);
   17067 	} else {
   17068 		/* Link stall fix for link down */
   17069 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17070 		    0x4100);
   17071 	}
   17072 
   17073 	wm_configure_k1_ich8lan(sc, k1_enable);
   17074 	sc->phy.release(sc);
   17075 
   17076 	return 0;
   17077 }
   17078 
   17079 /*
   17080  *  wm_k1_workaround_lv - K1 Si workaround
   17081  *  @sc:   pointer to the HW structure
   17082  *
   17083  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17084  *  Disable K1 for 1000 and 100 speeds
   17085  */
   17086 static int
   17087 wm_k1_workaround_lv(struct wm_softc *sc)
   17088 {
   17089 	uint32_t reg;
   17090 	uint16_t phyreg;
   17091 	int rv;
   17092 
   17093 	if (sc->sc_type != WM_T_PCH2)
   17094 		return 0;
   17095 
   17096 	/* Set K1 beacon duration based on 10Mbps speed */
   17097 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17098 	if (rv != 0)
   17099 		return rv;
   17100 
   17101 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17102 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17103 		if (phyreg &
   17104 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17105 			/* LV 1G/100 Packet drop issue wa  */
   17106 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17107 			    &phyreg);
   17108 			if (rv != 0)
   17109 				return rv;
   17110 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17111 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17112 			    phyreg);
   17113 			if (rv != 0)
   17114 				return rv;
   17115 		} else {
   17116 			/* For 10Mbps */
   17117 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17118 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17119 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17120 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17121 		}
   17122 	}
   17123 
   17124 	return 0;
   17125 }
   17126 
   17127 /*
   17128  *  wm_link_stall_workaround_hv - Si workaround
   17129  *  @sc: pointer to the HW structure
   17130  *
   17131  *  This function works around a Si bug where the link partner can get
   17132  *  a link up indication before the PHY does. If small packets are sent
   17133  *  by the link partner they can be placed in the packet buffer without
   17134  *  being properly accounted for by the PHY and will stall preventing
   17135  *  further packets from being received.  The workaround is to clear the
   17136  *  packet buffer after the PHY detects link up.
   17137  */
   17138 static int
   17139 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17140 {
   17141 	uint16_t phyreg;
   17142 
   17143 	if (sc->sc_phytype != WMPHY_82578)
   17144 		return 0;
   17145 
   17146 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17147 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17148 	if ((phyreg & BMCR_LOOP) != 0)
   17149 		return 0;
   17150 
   17151 	/* Check if link is up and at 1Gbps */
   17152 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17153 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17154 	    | BM_CS_STATUS_SPEED_MASK;
   17155 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17156 		| BM_CS_STATUS_SPEED_1000))
   17157 		return 0;
   17158 
   17159 	delay(200 * 1000);	/* XXX too big */
   17160 
   17161 	/* Flush the packets in the fifo buffer */
   17162 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17163 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17164 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17165 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17166 
   17167 	return 0;
   17168 }
   17169 
   17170 static int
   17171 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17172 {
   17173 	int rv;
   17174 
   17175 	rv = sc->phy.acquire(sc);
   17176 	if (rv != 0) {
   17177 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17178 		    __func__);
   17179 		return rv;
   17180 	}
   17181 
   17182 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17183 
   17184 	sc->phy.release(sc);
   17185 
   17186 	return rv;
   17187 }
   17188 
   17189 static int
   17190 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17191 {
   17192 	int rv;
   17193 	uint16_t reg;
   17194 
   17195 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17196 	if (rv != 0)
   17197 		return rv;
   17198 
   17199 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17200 	    reg | HV_KMRN_MDIO_SLOW);
   17201 }
   17202 
   17203 /*
   17204  *  wm_configure_k1_ich8lan - Configure K1 power state
   17205  *  @sc: pointer to the HW structure
   17206  *  @enable: K1 state to configure
   17207  *
   17208  *  Configure the K1 power state based on the provided parameter.
   17209  *  Assumes semaphore already acquired.
   17210  */
   17211 static void
   17212 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17213 {
   17214 	uint32_t ctrl, ctrl_ext, tmp;
   17215 	uint16_t kmreg;
   17216 	int rv;
   17217 
   17218 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17219 
   17220 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17221 	if (rv != 0)
   17222 		return;
   17223 
   17224 	if (k1_enable)
   17225 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17226 	else
   17227 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17228 
   17229 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17230 	if (rv != 0)
   17231 		return;
   17232 
   17233 	delay(20);
   17234 
   17235 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17236 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17237 
   17238 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17239 	tmp |= CTRL_FRCSPD;
   17240 
   17241 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17242 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17243 	CSR_WRITE_FLUSH(sc);
   17244 	delay(20);
   17245 
   17246 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17247 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17248 	CSR_WRITE_FLUSH(sc);
   17249 	delay(20);
   17250 
   17251 	return;
   17252 }
   17253 
   17254 /* special case - for 82575 - need to do manual init ... */
   17255 static void
   17256 wm_reset_init_script_82575(struct wm_softc *sc)
   17257 {
   17258 	/*
   17259 	 * Remark: this is untested code - we have no board without EEPROM
   17260 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17261 	 */
   17262 
   17263 	/* SerDes configuration via SERDESCTRL */
   17264 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17265 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17266 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17267 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17268 
   17269 	/* CCM configuration via CCMCTL register */
   17270 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17271 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17272 
   17273 	/* PCIe lanes configuration */
   17274 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17275 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17276 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17277 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17278 
   17279 	/* PCIe PLL Configuration */
   17280 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17281 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17282 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17283 }
   17284 
   17285 static void
   17286 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17287 {
   17288 	uint32_t reg;
   17289 	uint16_t nvmword;
   17290 	int rv;
   17291 
   17292 	if (sc->sc_type != WM_T_82580)
   17293 		return;
   17294 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17295 		return;
   17296 
   17297 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17298 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17299 	if (rv != 0) {
   17300 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17301 		    __func__);
   17302 		return;
   17303 	}
   17304 
   17305 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17306 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17307 		reg |= MDICNFG_DEST;
   17308 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17309 		reg |= MDICNFG_COM_MDIO;
   17310 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17311 }
   17312 
   17313 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17314 
   17315 static bool
   17316 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17317 {
   17318 	uint32_t reg;
   17319 	uint16_t id1, id2;
   17320 	int i, rv;
   17321 
   17322 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17323 		device_xname(sc->sc_dev), __func__));
   17324 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17325 
   17326 	id1 = id2 = 0xffff;
   17327 	for (i = 0; i < 2; i++) {
   17328 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17329 		    &id1);
   17330 		if ((rv != 0) || MII_INVALIDID(id1))
   17331 			continue;
   17332 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17333 		    &id2);
   17334 		if ((rv != 0) || MII_INVALIDID(id2))
   17335 			continue;
   17336 		break;
   17337 	}
   17338 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17339 		goto out;
   17340 
   17341 	/*
   17342 	 * In case the PHY needs to be in mdio slow mode,
   17343 	 * set slow mode and try to get the PHY id again.
   17344 	 */
   17345 	rv = 0;
   17346 	if (sc->sc_type < WM_T_PCH_LPT) {
   17347 		wm_set_mdio_slow_mode_hv_locked(sc);
   17348 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17349 		    &id1);
   17350 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17351 		    &id2);
   17352 	}
   17353 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17354 		device_printf(sc->sc_dev, "XXX return with false\n");
   17355 		return false;
   17356 	}
   17357 out:
   17358 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17359 		/* Only unforce SMBus if ME is not active */
   17360 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17361 			uint16_t phyreg;
   17362 
   17363 			/* Unforce SMBus mode in PHY */
   17364 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17365 			    CV_SMB_CTRL, &phyreg);
   17366 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17367 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17368 			    CV_SMB_CTRL, phyreg);
   17369 
   17370 			/* Unforce SMBus mode in MAC */
   17371 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17372 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17373 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17374 		}
   17375 	}
   17376 	return true;
   17377 }
   17378 
   17379 static void
   17380 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17381 {
   17382 	uint32_t reg;
   17383 	int i;
   17384 
   17385 	/* Set PHY Config Counter to 50msec */
   17386 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17387 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17388 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17389 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17390 
   17391 	/* Toggle LANPHYPC */
   17392 	reg = CSR_READ(sc, WMREG_CTRL);
   17393 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17394 	reg &= ~CTRL_LANPHYPC_VALUE;
   17395 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17396 	CSR_WRITE_FLUSH(sc);
   17397 	delay(1000);
   17398 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17399 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17400 	CSR_WRITE_FLUSH(sc);
   17401 
   17402 	if (sc->sc_type < WM_T_PCH_LPT)
   17403 		delay(50 * 1000);
   17404 	else {
   17405 		i = 20;
   17406 
   17407 		do {
   17408 			delay(5 * 1000);
   17409 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17410 		    && i--);
   17411 
   17412 		delay(30 * 1000);
   17413 	}
   17414 }
   17415 
   17416 static int
   17417 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17418 {
   17419 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17420 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17421 	uint32_t rxa;
   17422 	uint16_t scale = 0, lat_enc = 0;
   17423 	int32_t obff_hwm = 0;
   17424 	int64_t lat_ns, value;
   17425 
   17426 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17427 		device_xname(sc->sc_dev), __func__));
   17428 
   17429 	if (link) {
   17430 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17431 		uint32_t status;
   17432 		uint16_t speed;
   17433 		pcireg_t preg;
   17434 
   17435 		status = CSR_READ(sc, WMREG_STATUS);
   17436 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17437 		case STATUS_SPEED_10:
   17438 			speed = 10;
   17439 			break;
   17440 		case STATUS_SPEED_100:
   17441 			speed = 100;
   17442 			break;
   17443 		case STATUS_SPEED_1000:
   17444 			speed = 1000;
   17445 			break;
   17446 		default:
   17447 			device_printf(sc->sc_dev, "Unknown speed "
   17448 			    "(status = %08x)\n", status);
   17449 			return -1;
   17450 		}
   17451 
   17452 		/* Rx Packet Buffer Allocation size (KB) */
   17453 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17454 
   17455 		/*
   17456 		 * Determine the maximum latency tolerated by the device.
   17457 		 *
   17458 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17459 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17460 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17461 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17462 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17463 		 */
   17464 		lat_ns = ((int64_t)rxa * 1024 -
   17465 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17466 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17467 		if (lat_ns < 0)
   17468 			lat_ns = 0;
   17469 		else
   17470 			lat_ns /= speed;
   17471 		value = lat_ns;
   17472 
   17473 		while (value > LTRV_VALUE) {
   17474 			scale ++;
   17475 			value = howmany(value, __BIT(5));
   17476 		}
   17477 		if (scale > LTRV_SCALE_MAX) {
   17478 			device_printf(sc->sc_dev,
   17479 			    "Invalid LTR latency scale %d\n", scale);
   17480 			return -1;
   17481 		}
   17482 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17483 
   17484 		/* Determine the maximum latency tolerated by the platform */
   17485 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17486 		    WM_PCI_LTR_CAP_LPT);
   17487 		max_snoop = preg & 0xffff;
   17488 		max_nosnoop = preg >> 16;
   17489 
   17490 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17491 
   17492 		if (lat_enc > max_ltr_enc) {
   17493 			lat_enc = max_ltr_enc;
   17494 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17495 			    * PCI_LTR_SCALETONS(
   17496 				    __SHIFTOUT(lat_enc,
   17497 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17498 		}
   17499 
   17500 		if (lat_ns) {
   17501 			lat_ns *= speed * 1000;
   17502 			lat_ns /= 8;
   17503 			lat_ns /= 1000000000;
   17504 			obff_hwm = (int32_t)(rxa - lat_ns);
   17505 		}
   17506 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17507 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17508 			    "(rxa = %d, lat_ns = %d)\n",
   17509 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17510 			return -1;
   17511 		}
   17512 	}
   17513 	/* Snoop and No-Snoop latencies the same */
   17514 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17515 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17516 
   17517 	/* Set OBFF high water mark */
   17518 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17519 	reg |= obff_hwm;
   17520 	CSR_WRITE(sc, WMREG_SVT, reg);
   17521 
   17522 	/* Enable OBFF */
   17523 	reg = CSR_READ(sc, WMREG_SVCR);
   17524 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17525 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17526 
   17527 	return 0;
   17528 }
   17529 
   17530 /*
   17531  * I210 Errata 25 and I211 Errata 10
   17532  * Slow System Clock.
   17533  *
   17534  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17535  */
   17536 static int
   17537 wm_pll_workaround_i210(struct wm_softc *sc)
   17538 {
   17539 	uint32_t mdicnfg, wuc;
   17540 	uint32_t reg;
   17541 	pcireg_t pcireg;
   17542 	uint32_t pmreg;
   17543 	uint16_t nvmword, tmp_nvmword;
   17544 	uint16_t phyval;
   17545 	bool wa_done = false;
   17546 	int i, rv = 0;
   17547 
   17548 	/* Get Power Management cap offset */
   17549 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17550 	    &pmreg, NULL) == 0)
   17551 		return -1;
   17552 
   17553 	/* Save WUC and MDICNFG registers */
   17554 	wuc = CSR_READ(sc, WMREG_WUC);
   17555 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17556 
   17557 	reg = mdicnfg & ~MDICNFG_DEST;
   17558 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17559 
   17560 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17561 		/*
   17562 		 * The default value of the Initialization Control Word 1
   17563 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17564 		 */
   17565 		nvmword = INVM_DEFAULT_AL;
   17566 	}
   17567 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17568 
   17569 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17570 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17571 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17572 
   17573 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17574 			rv = 0;
   17575 			break; /* OK */
   17576 		} else
   17577 			rv = -1;
   17578 
   17579 		wa_done = true;
   17580 		/* Directly reset the internal PHY */
   17581 		reg = CSR_READ(sc, WMREG_CTRL);
   17582 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17583 
   17584 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17585 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17586 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17587 
   17588 		CSR_WRITE(sc, WMREG_WUC, 0);
   17589 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17590 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17591 
   17592 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17593 		    pmreg + PCI_PMCSR);
   17594 		pcireg |= PCI_PMCSR_STATE_D3;
   17595 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17596 		    pmreg + PCI_PMCSR, pcireg);
   17597 		delay(1000);
   17598 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17599 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17600 		    pmreg + PCI_PMCSR, pcireg);
   17601 
   17602 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17603 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17604 
   17605 		/* Restore WUC register */
   17606 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17607 	}
   17608 
   17609 	/* Restore MDICNFG setting */
   17610 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17611 	if (wa_done)
   17612 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17613 	return rv;
   17614 }
   17615 
   17616 static void
   17617 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17618 {
   17619 	uint32_t reg;
   17620 
   17621 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17622 		device_xname(sc->sc_dev), __func__));
   17623 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17624 	    || (sc->sc_type == WM_T_PCH_CNP));
   17625 
   17626 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17627 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17628 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17629 
   17630 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17631 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17632 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17633 }
   17634 
   17635 /* Sysctl functions */
   17636 static int
   17637 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17638 {
   17639 	struct sysctlnode node = *rnode;
   17640 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17641 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17642 	struct wm_softc *sc = txq->txq_sc;
   17643 	uint32_t reg;
   17644 
   17645 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17646 	node.sysctl_data = &reg;
   17647 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17648 }
   17649 
   17650 static int
   17651 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17652 {
   17653 	struct sysctlnode node = *rnode;
   17654 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17655 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17656 	struct wm_softc *sc = txq->txq_sc;
   17657 	uint32_t reg;
   17658 
   17659 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17660 	node.sysctl_data = &reg;
   17661 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17662 }
   17663 
   17664 #ifdef WM_DEBUG
   17665 static int
   17666 wm_sysctl_debug(SYSCTLFN_ARGS)
   17667 {
   17668 	struct sysctlnode node = *rnode;
   17669 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17670 	uint32_t dflags;
   17671 	int error;
   17672 
   17673 	dflags = sc->sc_debug;
   17674 	node.sysctl_data = &dflags;
   17675 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17676 
   17677 	if (error || newp == NULL)
   17678 		return error;
   17679 
   17680 	sc->sc_debug = dflags;
   17681 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17682 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17683 
   17684 	return 0;
   17685 }
   17686 #endif
   17687