Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.772
      1 /*	$NetBSD: if_wm.c,v 1.772 2023/05/11 07:04:06 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.772 2023/05/11 07:04:06 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 
    464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	bool wmq_wq_enqueued;
    482 	struct work wmq_cookie;
    483 	void *wmq_si;
    484 };
    485 
    486 struct wm_phyop {
    487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    488 	void (*release)(struct wm_softc *);
    489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    490 	int (*writereg_locked)(device_t, int, int, uint16_t);
    491 	int reset_delay_us;
    492 	bool no_errprint;
    493 };
    494 
    495 struct wm_nvmop {
    496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    497 	void (*release)(struct wm_softc *);
    498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    499 };
    500 
    501 /*
    502  * Software state per device.
    503  */
    504 struct wm_softc {
    505 	device_t sc_dev;		/* generic device information */
    506 	bus_space_tag_t sc_st;		/* bus space tag */
    507 	bus_space_handle_t sc_sh;	/* bus space handle */
    508 	bus_size_t sc_ss;		/* bus space size */
    509 	bus_space_tag_t sc_iot;		/* I/O space tag */
    510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    511 	bus_size_t sc_ios;		/* I/O space size */
    512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    514 	bus_size_t sc_flashs;		/* flash registers space size */
    515 	off_t sc_flashreg_offset;	/*
    516 					 * offset to flash registers from
    517 					 * start of BAR
    518 					 */
    519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    520 
    521 	struct ethercom sc_ethercom;	/* Ethernet common data */
    522 	struct mii_data sc_mii;		/* MII/media information */
    523 
    524 	pci_chipset_tag_t sc_pc;
    525 	pcitag_t sc_pcitag;
    526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    528 
    529 	uint16_t sc_pcidevid;		/* PCI device ID */
    530 	wm_chip_type sc_type;		/* MAC type */
    531 	int sc_rev;			/* MAC revision */
    532 	wm_phy_type sc_phytype;		/* PHY type */
    533 	uint8_t sc_sfptype;		/* SFP type */
    534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    535 #define	WM_MEDIATYPE_UNKNOWN		0x00
    536 #define	WM_MEDIATYPE_FIBER		0x01
    537 #define	WM_MEDIATYPE_COPPER		0x02
    538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    540 	int sc_flags;			/* flags; see below */
    541 	u_short sc_if_flags;		/* last if_flags */
    542 	int sc_ec_capenable;		/* last ec_capenable */
    543 	int sc_flowflags;		/* 802.3x flow control flags */
    544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    545 	int sc_align_tweak;
    546 
    547 	void *sc_ihs[WM_MAX_NINTR];	/*
    548 					 * interrupt cookie.
    549 					 * - legacy and msi use sc_ihs[0] only
    550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    551 					 */
    552 	pci_intr_handle_t *sc_intrs;	/*
    553 					 * legacy and msi use sc_intrs[0] only
    554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    555 					 */
    556 	int sc_nintrs;			/* number of interrupts */
    557 
    558 	int sc_link_intr_idx;		/* index of MSI-X tables */
    559 
    560 	callout_t sc_tick_ch;		/* tick callout */
    561 	bool sc_core_stopping;
    562 
    563 	int sc_nvm_ver_major;
    564 	int sc_nvm_ver_minor;
    565 	int sc_nvm_ver_build;
    566 	int sc_nvm_addrbits;		/* NVM address bits */
    567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    568 	int sc_ich8_flash_base;
    569 	int sc_ich8_flash_bank_size;
    570 	int sc_nvm_k1_enabled;
    571 
    572 	int sc_nqueues;
    573 	struct wm_queue *sc_queue;
    574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    578 	struct workqueue *sc_queue_wq;
    579 	bool sc_txrx_use_workqueue;
    580 
    581 	int sc_affinity_offset;
    582 
    583 #ifdef WM_EVENT_COUNTERS
    584 	/* Event counters. */
    585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    586 
    587 	/* >= WM_T_82542_2_1 */
    588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    593 
    594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    599 	struct evcnt sc_ev_scc;		/* Single Collision */
    600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    602 	struct evcnt sc_ev_latecol;	/* Late Collision */
    603 	struct evcnt sc_ev_colc;	/* Collision */
    604 	struct evcnt sc_ev_dc;		/* Defer */
    605 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    606 	struct evcnt sc_ev_sec;		/* Sequence Error */
    607 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    608 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    609 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    610 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    611 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    612 	struct evcnt sc_ev_prc511;	/* Packets Rx (255-511 bytes) */
    613 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    614 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    615 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    616 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    617 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    618 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    619 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    620 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    621 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    622 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    623 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    624 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    625 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    626 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    627 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    628 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    629 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    630 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    631 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    632 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    633 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    634 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    635 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    636 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    637 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    638 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    639 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    640 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx Count */
    641 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    642 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    643 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    644 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    645 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    646 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    647 	struct evcnt sc_ev_ictxact;	/* Intr. Cause Tx Abs Timer Expire */
    648 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    649 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    650 	struct evcnt sc_ev_icrxdmtc;	/* Intr. Cause Rx Desc Min Thresh */
    651 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    652 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    653 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    654 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    655 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    656 
    657 #endif /* WM_EVENT_COUNTERS */
    658 
    659 	struct sysctllog *sc_sysctllog;
    660 
    661 	/* This variable are used only on the 82547. */
    662 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    663 
    664 	uint32_t sc_ctrl;		/* prototype CTRL register */
    665 #if 0
    666 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    667 #endif
    668 	uint32_t sc_icr;		/* prototype interrupt bits */
    669 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    670 	uint32_t sc_tctl;		/* prototype TCTL register */
    671 	uint32_t sc_rctl;		/* prototype RCTL register */
    672 	uint32_t sc_txcw;		/* prototype TXCW register */
    673 	uint32_t sc_tipg;		/* prototype TIPG register */
    674 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    675 	uint32_t sc_pba;		/* prototype PBA register */
    676 
    677 	int sc_tbi_linkup;		/* TBI link status */
    678 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    679 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    680 
    681 	int sc_mchash_type;		/* multicast filter offset */
    682 
    683 	krndsource_t rnd_source;	/* random source */
    684 
    685 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    686 
    687 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    688 	kmutex_t *sc_ich_phymtx;	/*
    689 					 * 82574/82583/ICH/PCH specific PHY
    690 					 * mutex. For 82574/82583, the mutex
    691 					 * is used for both PHY and NVM.
    692 					 */
    693 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    694 
    695 	struct wm_phyop phy;
    696 	struct wm_nvmop nvm;
    697 
    698 	struct workqueue *sc_reset_wq;
    699 	struct work sc_reset_work;
    700 	volatile unsigned sc_reset_pending;
    701 
    702 	bool sc_dying;
    703 
    704 #ifdef WM_DEBUG
    705 	uint32_t sc_debug;
    706 	bool sc_trigger_reset;
    707 #endif
    708 };
    709 
    710 #define	WM_RXCHAIN_RESET(rxq)						\
    711 do {									\
    712 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    713 	*(rxq)->rxq_tailp = NULL;					\
    714 	(rxq)->rxq_len = 0;						\
    715 } while (/*CONSTCOND*/0)
    716 
    717 #define	WM_RXCHAIN_LINK(rxq, m)						\
    718 do {									\
    719 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    720 	(rxq)->rxq_tailp = &(m)->m_next;				\
    721 } while (/*CONSTCOND*/0)
    722 
    723 #ifdef WM_EVENT_COUNTERS
    724 #ifdef __HAVE_ATOMIC64_LOADSTORE
    725 #define	WM_EVCNT_INCR(ev)						\
    726 	atomic_store_relaxed(&((ev)->ev_count),				\
    727 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    728 #define	WM_EVCNT_ADD(ev, val)						\
    729 	atomic_store_relaxed(&((ev)->ev_count),				\
    730 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    731 #else
    732 #define	WM_EVCNT_INCR(ev)						\
    733 	((ev)->ev_count)++
    734 #define	WM_EVCNT_ADD(ev, val)						\
    735 	(ev)->ev_count += (val)
    736 #endif
    737 
    738 #define WM_Q_EVCNT_INCR(qname, evname)			\
    739 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    740 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    741 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    742 #else /* !WM_EVENT_COUNTERS */
    743 #define	WM_EVCNT_INCR(ev)	/* nothing */
    744 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    745 
    746 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    747 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    748 #endif /* !WM_EVENT_COUNTERS */
    749 
    750 #define	CSR_READ(sc, reg)						\
    751 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    752 #define	CSR_WRITE(sc, reg, val)						\
    753 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    754 #define	CSR_WRITE_FLUSH(sc)						\
    755 	(void)CSR_READ((sc), WMREG_STATUS)
    756 
    757 #define ICH8_FLASH_READ32(sc, reg)					\
    758 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    759 	    (reg) + sc->sc_flashreg_offset)
    760 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    761 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    762 	    (reg) + sc->sc_flashreg_offset, (data))
    763 
    764 #define ICH8_FLASH_READ16(sc, reg)					\
    765 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    766 	    (reg) + sc->sc_flashreg_offset)
    767 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    768 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    769 	    (reg) + sc->sc_flashreg_offset, (data))
    770 
    771 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    772 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    773 
    774 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    775 #define	WM_CDTXADDR_HI(txq, x)						\
    776 	(sizeof(bus_addr_t) == 8 ?					\
    777 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    778 
    779 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    780 #define	WM_CDRXADDR_HI(rxq, x)						\
    781 	(sizeof(bus_addr_t) == 8 ?					\
    782 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    783 
    784 /*
    785  * Register read/write functions.
    786  * Other than CSR_{READ|WRITE}().
    787  */
    788 #if 0
    789 static inline uint32_t wm_io_read(struct wm_softc *, int);
    790 #endif
    791 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    792 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    793     uint32_t, uint32_t);
    794 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    795 
    796 /*
    797  * Descriptor sync/init functions.
    798  */
    799 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    800 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    801 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    802 
    803 /*
    804  * Device driver interface functions and commonly used functions.
    805  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    806  */
    807 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    808 static int	wm_match(device_t, cfdata_t, void *);
    809 static void	wm_attach(device_t, device_t, void *);
    810 static int	wm_detach(device_t, int);
    811 static bool	wm_suspend(device_t, const pmf_qual_t *);
    812 static bool	wm_resume(device_t, const pmf_qual_t *);
    813 static bool	wm_watchdog(struct ifnet *);
    814 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    815     uint16_t *);
    816 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    817     uint16_t *);
    818 static void	wm_tick(void *);
    819 static int	wm_ifflags_cb(struct ethercom *);
    820 static int	wm_ioctl(struct ifnet *, u_long, void *);
    821 /* MAC address related */
    822 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    823 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    824 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    825 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    826 static int	wm_rar_count(struct wm_softc *);
    827 static void	wm_set_filter(struct wm_softc *);
    828 /* Reset and init related */
    829 static void	wm_set_vlan(struct wm_softc *);
    830 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    831 static void	wm_get_auto_rd_done(struct wm_softc *);
    832 static void	wm_lan_init_done(struct wm_softc *);
    833 static void	wm_get_cfg_done(struct wm_softc *);
    834 static int	wm_phy_post_reset(struct wm_softc *);
    835 static int	wm_write_smbus_addr(struct wm_softc *);
    836 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    837 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    838 static void	wm_initialize_hardware_bits(struct wm_softc *);
    839 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    840 static int	wm_reset_phy(struct wm_softc *);
    841 static void	wm_flush_desc_rings(struct wm_softc *);
    842 static void	wm_reset(struct wm_softc *);
    843 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    844 static void	wm_rxdrain(struct wm_rxqueue *);
    845 static void	wm_init_rss(struct wm_softc *);
    846 static void	wm_adjust_qnum(struct wm_softc *, int);
    847 static inline bool	wm_is_using_msix(struct wm_softc *);
    848 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    849 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    850 static int	wm_setup_legacy(struct wm_softc *);
    851 static int	wm_setup_msix(struct wm_softc *);
    852 static int	wm_init(struct ifnet *);
    853 static int	wm_init_locked(struct ifnet *);
    854 static void	wm_init_sysctls(struct wm_softc *);
    855 static void	wm_unset_stopping_flags(struct wm_softc *);
    856 static void	wm_set_stopping_flags(struct wm_softc *);
    857 static void	wm_stop(struct ifnet *, int);
    858 static void	wm_stop_locked(struct ifnet *, bool, bool);
    859 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    860 static void	wm_82547_txfifo_stall(void *);
    861 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    862 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    863 /* DMA related */
    864 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    865 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    866 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    867 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    868     struct wm_txqueue *);
    869 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    870 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    871 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    872     struct wm_rxqueue *);
    873 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    874 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    875 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    876 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    877 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    878 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    879 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    880     struct wm_txqueue *);
    881 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    882     struct wm_rxqueue *);
    883 static int	wm_alloc_txrx_queues(struct wm_softc *);
    884 static void	wm_free_txrx_queues(struct wm_softc *);
    885 static int	wm_init_txrx_queues(struct wm_softc *);
    886 /* Start */
    887 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    888     struct wm_txsoft *, uint32_t *, uint8_t *);
    889 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    890 static void	wm_start(struct ifnet *);
    891 static void	wm_start_locked(struct ifnet *);
    892 static int	wm_transmit(struct ifnet *, struct mbuf *);
    893 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    894 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    895     bool);
    896 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    897     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    898 static void	wm_nq_start(struct ifnet *);
    899 static void	wm_nq_start_locked(struct ifnet *);
    900 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    901 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    902 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    903     bool);
    904 static void	wm_deferred_start_locked(struct wm_txqueue *);
    905 static void	wm_handle_queue(void *);
    906 static void	wm_handle_queue_work(struct work *, void *);
    907 static void	wm_handle_reset_work(struct work *, void *);
    908 /* Interrupt */
    909 static bool	wm_txeof(struct wm_txqueue *, u_int);
    910 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    911 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    912 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    913 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    914 static void	wm_linkintr(struct wm_softc *, uint32_t);
    915 static int	wm_intr_legacy(void *);
    916 static inline void	wm_txrxintr_disable(struct wm_queue *);
    917 static inline void	wm_txrxintr_enable(struct wm_queue *);
    918 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    919 static int	wm_txrxintr_msix(void *);
    920 static int	wm_linkintr_msix(void *);
    921 
    922 /*
    923  * Media related.
    924  * GMII, SGMII, TBI, SERDES and SFP.
    925  */
    926 /* Common */
    927 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    928 /* GMII related */
    929 static void	wm_gmii_reset(struct wm_softc *);
    930 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    931 static int	wm_get_phy_id_82575(struct wm_softc *);
    932 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    933 static int	wm_gmii_mediachange(struct ifnet *);
    934 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    935 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    936 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    937 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    938 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    939 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    940 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    941 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    942 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    943 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    944 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    945 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    946 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    947 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    948 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    949 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    950 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    951 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    952 	bool);
    953 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    954 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    955 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    956 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    957 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    958 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    959 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    960 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    961 static void	wm_gmii_statchg(struct ifnet *);
    962 /*
    963  * kumeran related (80003, ICH* and PCH*).
    964  * These functions are not for accessing MII registers but for accessing
    965  * kumeran specific registers.
    966  */
    967 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    968 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    969 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    970 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    971 /* EMI register related */
    972 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    973 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    974 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    975 /* SGMII */
    976 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    977 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    978 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    979 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    980 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    981 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    982 /* TBI related */
    983 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    984 static void	wm_tbi_mediainit(struct wm_softc *);
    985 static int	wm_tbi_mediachange(struct ifnet *);
    986 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    987 static int	wm_check_for_link(struct wm_softc *);
    988 static void	wm_tbi_tick(struct wm_softc *);
    989 /* SERDES related */
    990 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    991 static int	wm_serdes_mediachange(struct ifnet *);
    992 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    993 static void	wm_serdes_tick(struct wm_softc *);
    994 /* SFP related */
    995 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    996 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    997 
    998 /*
    999  * NVM related.
   1000  * Microwire, SPI (w/wo EERD) and Flash.
   1001  */
   1002 /* Misc functions */
   1003 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1004 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1005 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1006 /* Microwire */
   1007 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1008 /* SPI */
   1009 static int	wm_nvm_ready_spi(struct wm_softc *);
   1010 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1011 /* Using with EERD */
   1012 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1013 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1014 /* Flash */
   1015 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1016     unsigned int *);
   1017 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1018 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1019 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1020     uint32_t *);
   1021 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1022 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1023 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1024 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1025 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1026 /* iNVM */
   1027 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1028 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1029 /* Lock, detecting NVM type, validate checksum and read */
   1030 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1031 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1032 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1033 static void	wm_nvm_version_invm(struct wm_softc *);
   1034 static void	wm_nvm_version(struct wm_softc *);
   1035 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1036 
   1037 /*
   1038  * Hardware semaphores.
   1039  * Very complexed...
   1040  */
   1041 static int	wm_get_null(struct wm_softc *);
   1042 static void	wm_put_null(struct wm_softc *);
   1043 static int	wm_get_eecd(struct wm_softc *);
   1044 static void	wm_put_eecd(struct wm_softc *);
   1045 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1046 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1047 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1048 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1049 static int	wm_get_nvm_80003(struct wm_softc *);
   1050 static void	wm_put_nvm_80003(struct wm_softc *);
   1051 static int	wm_get_nvm_82571(struct wm_softc *);
   1052 static void	wm_put_nvm_82571(struct wm_softc *);
   1053 static int	wm_get_phy_82575(struct wm_softc *);
   1054 static void	wm_put_phy_82575(struct wm_softc *);
   1055 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1056 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1057 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1058 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1059 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1060 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1061 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1062 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1063 
   1064 /*
   1065  * Management mode and power management related subroutines.
   1066  * BMC, AMT, suspend/resume and EEE.
   1067  */
   1068 #if 0
   1069 static int	wm_check_mng_mode(struct wm_softc *);
   1070 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1071 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1072 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1073 #endif
   1074 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1075 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1076 static void	wm_get_hw_control(struct wm_softc *);
   1077 static void	wm_release_hw_control(struct wm_softc *);
   1078 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1079 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1080 static void	wm_init_manageability(struct wm_softc *);
   1081 static void	wm_release_manageability(struct wm_softc *);
   1082 static void	wm_get_wakeup(struct wm_softc *);
   1083 static int	wm_ulp_disable(struct wm_softc *);
   1084 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1085 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1086 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1087 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1088 static void	wm_enable_wakeup(struct wm_softc *);
   1089 static void	wm_disable_aspm(struct wm_softc *);
   1090 /* LPLU (Low Power Link Up) */
   1091 static void	wm_lplu_d0_disable(struct wm_softc *);
   1092 /* EEE */
   1093 static int	wm_set_eee_i350(struct wm_softc *);
   1094 static int	wm_set_eee_pchlan(struct wm_softc *);
   1095 static int	wm_set_eee(struct wm_softc *);
   1096 
   1097 /*
   1098  * Workarounds (mainly PHY related).
   1099  * Basically, PHY's workarounds are in the PHY drivers.
   1100  */
   1101 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1102 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1103 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1104 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1105 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1106 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1107 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1108 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1109 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1110 static int	wm_k1_workaround_lv(struct wm_softc *);
   1111 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1112 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1113 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1114 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1115 static void	wm_reset_init_script_82575(struct wm_softc *);
   1116 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1117 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1118 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1119 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1120 static int	wm_pll_workaround_i210(struct wm_softc *);
   1121 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1122 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1123 static void	wm_set_linkdown_discard(struct wm_softc *);
   1124 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1125 
   1126 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1127 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1128 #ifdef WM_DEBUG
   1129 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1130 #endif
   1131 
   1132 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1133     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1134 
   1135 /*
   1136  * Devices supported by this driver.
   1137  */
   1138 static const struct wm_product {
   1139 	pci_vendor_id_t		wmp_vendor;
   1140 	pci_product_id_t	wmp_product;
   1141 	const char		*wmp_name;
   1142 	wm_chip_type		wmp_type;
   1143 	uint32_t		wmp_flags;
   1144 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1145 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1146 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1147 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1148 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1149 } wm_products[] = {
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1151 	  "Intel i82542 1000BASE-X Ethernet",
   1152 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1155 	  "Intel i82543GC 1000BASE-X Ethernet",
   1156 	  WM_T_82543,		WMP_F_FIBER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1159 	  "Intel i82543GC 1000BASE-T Ethernet",
   1160 	  WM_T_82543,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1163 	  "Intel i82544EI 1000BASE-T Ethernet",
   1164 	  WM_T_82544,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1167 	  "Intel i82544EI 1000BASE-X Ethernet",
   1168 	  WM_T_82544,		WMP_F_FIBER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1171 	  "Intel i82544GC 1000BASE-T Ethernet",
   1172 	  WM_T_82544,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1175 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1176 	  WM_T_82544,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1179 	  "Intel i82540EM 1000BASE-T Ethernet",
   1180 	  WM_T_82540,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1183 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1184 	  WM_T_82540,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1187 	  "Intel i82540EP 1000BASE-T Ethernet",
   1188 	  WM_T_82540,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1191 	  "Intel i82540EP 1000BASE-T Ethernet",
   1192 	  WM_T_82540,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1195 	  "Intel i82540EP 1000BASE-T Ethernet",
   1196 	  WM_T_82540,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1199 	  "Intel i82545EM 1000BASE-T Ethernet",
   1200 	  WM_T_82545,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1203 	  "Intel i82545GM 1000BASE-T Ethernet",
   1204 	  WM_T_82545_3,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1207 	  "Intel i82545GM 1000BASE-X Ethernet",
   1208 	  WM_T_82545_3,		WMP_F_FIBER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1211 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1212 	  WM_T_82545_3,		WMP_F_SERDES },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1215 	  "Intel i82546EB 1000BASE-T Ethernet",
   1216 	  WM_T_82546,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1219 	  "Intel i82546EB 1000BASE-T Ethernet",
   1220 	  WM_T_82546,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1223 	  "Intel i82545EM 1000BASE-X Ethernet",
   1224 	  WM_T_82545,		WMP_F_FIBER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1227 	  "Intel i82546EB 1000BASE-X Ethernet",
   1228 	  WM_T_82546,		WMP_F_FIBER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1231 	  "Intel i82546GB 1000BASE-T Ethernet",
   1232 	  WM_T_82546_3,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1235 	  "Intel i82546GB 1000BASE-X Ethernet",
   1236 	  WM_T_82546_3,		WMP_F_FIBER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1239 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1240 	  WM_T_82546_3,		WMP_F_SERDES },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1243 	  "i82546GB quad-port Gigabit Ethernet",
   1244 	  WM_T_82546_3,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1247 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1248 	  WM_T_82546_3,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1251 	  "Intel PRO/1000MT (82546GB)",
   1252 	  WM_T_82546_3,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1255 	  "Intel i82541EI 1000BASE-T Ethernet",
   1256 	  WM_T_82541,		WMP_F_COPPER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1259 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1260 	  WM_T_82541,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1263 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1264 	  WM_T_82541,		WMP_F_COPPER },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1267 	  "Intel i82541ER 1000BASE-T Ethernet",
   1268 	  WM_T_82541_2,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1271 	  "Intel i82541GI 1000BASE-T Ethernet",
   1272 	  WM_T_82541_2,		WMP_F_COPPER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1275 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1276 	  WM_T_82541_2,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1279 	  "Intel i82541PI 1000BASE-T Ethernet",
   1280 	  WM_T_82541_2,		WMP_F_COPPER },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1283 	  "Intel i82547EI 1000BASE-T Ethernet",
   1284 	  WM_T_82547,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1287 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1288 	  WM_T_82547,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1291 	  "Intel i82547GI 1000BASE-T Ethernet",
   1292 	  WM_T_82547_2,		WMP_F_COPPER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1295 	  "Intel PRO/1000 PT (82571EB)",
   1296 	  WM_T_82571,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1299 	  "Intel PRO/1000 PF (82571EB)",
   1300 	  WM_T_82571,		WMP_F_FIBER },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1303 	  "Intel PRO/1000 PB (82571EB)",
   1304 	  WM_T_82571,		WMP_F_SERDES },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1307 	  "Intel PRO/1000 QT (82571EB)",
   1308 	  WM_T_82571,		WMP_F_COPPER },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1311 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1312 	  WM_T_82571,		WMP_F_COPPER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1315 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1316 	  WM_T_82571,		WMP_F_COPPER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1319 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1320 	  WM_T_82571,		WMP_F_SERDES },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1323 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1324 	  WM_T_82571,		WMP_F_SERDES },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1327 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1328 	  WM_T_82571,		WMP_F_FIBER },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1331 	  "Intel i82572EI 1000baseT Ethernet",
   1332 	  WM_T_82572,		WMP_F_COPPER },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1335 	  "Intel i82572EI 1000baseX Ethernet",
   1336 	  WM_T_82572,		WMP_F_FIBER },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1339 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1340 	  WM_T_82572,		WMP_F_SERDES },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1343 	  "Intel i82572EI 1000baseT Ethernet",
   1344 	  WM_T_82572,		WMP_F_COPPER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1347 	  "Intel i82573E",
   1348 	  WM_T_82573,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1351 	  "Intel i82573E IAMT",
   1352 	  WM_T_82573,		WMP_F_COPPER },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1355 	  "Intel i82573L Gigabit Ethernet",
   1356 	  WM_T_82573,		WMP_F_COPPER },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1359 	  "Intel i82574L",
   1360 	  WM_T_82574,		WMP_F_COPPER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1363 	  "Intel i82574L",
   1364 	  WM_T_82574,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1367 	  "Intel i82583V",
   1368 	  WM_T_82583,		WMP_F_COPPER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1371 	  "i80003 dual 1000baseT Ethernet",
   1372 	  WM_T_80003,		WMP_F_COPPER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1375 	  "i80003 dual 1000baseX Ethernet",
   1376 	  WM_T_80003,		WMP_F_COPPER },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1379 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1380 	  WM_T_80003,		WMP_F_SERDES },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1383 	  "Intel i80003 1000baseT Ethernet",
   1384 	  WM_T_80003,		WMP_F_COPPER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1387 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1388 	  WM_T_80003,		WMP_F_SERDES },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1391 	  "Intel i82801H (M_AMT) LAN Controller",
   1392 	  WM_T_ICH8,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1394 	  "Intel i82801H (AMT) LAN Controller",
   1395 	  WM_T_ICH8,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1397 	  "Intel i82801H LAN Controller",
   1398 	  WM_T_ICH8,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1400 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1401 	  WM_T_ICH8,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1403 	  "Intel i82801H (M) LAN Controller",
   1404 	  WM_T_ICH8,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1406 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1407 	  WM_T_ICH8,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1409 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1410 	  WM_T_ICH8,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1412 	  "82567V-3 LAN Controller",
   1413 	  WM_T_ICH8,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1415 	  "82801I (AMT) LAN Controller",
   1416 	  WM_T_ICH9,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1418 	  "82801I 10/100 LAN Controller",
   1419 	  WM_T_ICH9,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1421 	  "82801I (G) 10/100 LAN Controller",
   1422 	  WM_T_ICH9,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1424 	  "82801I (GT) 10/100 LAN Controller",
   1425 	  WM_T_ICH9,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1427 	  "82801I (C) LAN Controller",
   1428 	  WM_T_ICH9,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1430 	  "82801I mobile LAN Controller",
   1431 	  WM_T_ICH9,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1433 	  "82801I mobile (V) LAN Controller",
   1434 	  WM_T_ICH9,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1436 	  "82801I mobile (AMT) LAN Controller",
   1437 	  WM_T_ICH9,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1439 	  "82567LM-4 LAN Controller",
   1440 	  WM_T_ICH9,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1442 	  "82567LM-2 LAN Controller",
   1443 	  WM_T_ICH10,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1445 	  "82567LF-2 LAN Controller",
   1446 	  WM_T_ICH10,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1448 	  "82567LM-3 LAN Controller",
   1449 	  WM_T_ICH10,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1451 	  "82567LF-3 LAN Controller",
   1452 	  WM_T_ICH10,		WMP_F_COPPER },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1454 	  "82567V-2 LAN Controller",
   1455 	  WM_T_ICH10,		WMP_F_COPPER },
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1457 	  "82567V-3? LAN Controller",
   1458 	  WM_T_ICH10,		WMP_F_COPPER },
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1460 	  "HANKSVILLE LAN Controller",
   1461 	  WM_T_ICH10,		WMP_F_COPPER },
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1463 	  "PCH LAN (82577LM) Controller",
   1464 	  WM_T_PCH,		WMP_F_COPPER },
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1466 	  "PCH LAN (82577LC) Controller",
   1467 	  WM_T_PCH,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1469 	  "PCH LAN (82578DM) Controller",
   1470 	  WM_T_PCH,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1472 	  "PCH LAN (82578DC) Controller",
   1473 	  WM_T_PCH,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1475 	  "PCH2 LAN (82579LM) Controller",
   1476 	  WM_T_PCH2,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1478 	  "PCH2 LAN (82579V) Controller",
   1479 	  WM_T_PCH2,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1481 	  "82575EB dual-1000baseT Ethernet",
   1482 	  WM_T_82575,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1484 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1485 	  WM_T_82575,		WMP_F_SERDES },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1487 	  "82575GB quad-1000baseT Ethernet",
   1488 	  WM_T_82575,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1490 	  "82575GB quad-1000baseT Ethernet (PM)",
   1491 	  WM_T_82575,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1493 	  "82576 1000BaseT Ethernet",
   1494 	  WM_T_82576,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1496 	  "82576 1000BaseX Ethernet",
   1497 	  WM_T_82576,		WMP_F_FIBER },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1500 	  "82576 gigabit Ethernet (SERDES)",
   1501 	  WM_T_82576,		WMP_F_SERDES },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1504 	  "82576 quad-1000BaseT Ethernet",
   1505 	  WM_T_82576,		WMP_F_COPPER },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1508 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1509 	  WM_T_82576,		WMP_F_COPPER },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1512 	  "82576 gigabit Ethernet",
   1513 	  WM_T_82576,		WMP_F_COPPER },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1516 	  "82576 gigabit Ethernet (SERDES)",
   1517 	  WM_T_82576,		WMP_F_SERDES },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1519 	  "82576 quad-gigabit Ethernet (SERDES)",
   1520 	  WM_T_82576,		WMP_F_SERDES },
   1521 
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1523 	  "82580 1000BaseT Ethernet",
   1524 	  WM_T_82580,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1526 	  "82580 1000BaseX Ethernet",
   1527 	  WM_T_82580,		WMP_F_FIBER },
   1528 
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1530 	  "82580 1000BaseT Ethernet (SERDES)",
   1531 	  WM_T_82580,		WMP_F_SERDES },
   1532 
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1534 	  "82580 gigabit Ethernet (SGMII)",
   1535 	  WM_T_82580,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1537 	  "82580 dual-1000BaseT Ethernet",
   1538 	  WM_T_82580,		WMP_F_COPPER },
   1539 
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1541 	  "82580 quad-1000BaseX Ethernet",
   1542 	  WM_T_82580,		WMP_F_FIBER },
   1543 
   1544 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1545 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1546 	  WM_T_82580,		WMP_F_COPPER },
   1547 
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1549 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1550 	  WM_T_82580,		WMP_F_SERDES },
   1551 
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1553 	  "DH89XXCC 1000BASE-KX Ethernet",
   1554 	  WM_T_82580,		WMP_F_SERDES },
   1555 
   1556 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1557 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1558 	  WM_T_82580,		WMP_F_SERDES },
   1559 
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1561 	  "I350 Gigabit Network Connection",
   1562 	  WM_T_I350,		WMP_F_COPPER },
   1563 
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1565 	  "I350 Gigabit Fiber Network Connection",
   1566 	  WM_T_I350,		WMP_F_FIBER },
   1567 
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1569 	  "I350 Gigabit Backplane Connection",
   1570 	  WM_T_I350,		WMP_F_SERDES },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1573 	  "I350 Quad Port Gigabit Ethernet",
   1574 	  WM_T_I350,		WMP_F_SERDES },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1577 	  "I350 Gigabit Connection",
   1578 	  WM_T_I350,		WMP_F_COPPER },
   1579 
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1581 	  "I354 Gigabit Ethernet (KX)",
   1582 	  WM_T_I354,		WMP_F_SERDES },
   1583 
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1585 	  "I354 Gigabit Ethernet (SGMII)",
   1586 	  WM_T_I354,		WMP_F_COPPER },
   1587 
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1589 	  "I354 Gigabit Ethernet (2.5G)",
   1590 	  WM_T_I354,		WMP_F_COPPER },
   1591 
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1593 	  "I210-T1 Ethernet Server Adapter",
   1594 	  WM_T_I210,		WMP_F_COPPER },
   1595 
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1597 	  "I210 Ethernet (Copper OEM)",
   1598 	  WM_T_I210,		WMP_F_COPPER },
   1599 
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1601 	  "I210 Ethernet (Copper IT)",
   1602 	  WM_T_I210,		WMP_F_COPPER },
   1603 
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1605 	  "I210 Ethernet (Copper, FLASH less)",
   1606 	  WM_T_I210,		WMP_F_COPPER },
   1607 
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1609 	  "I210 Gigabit Ethernet (Fiber)",
   1610 	  WM_T_I210,		WMP_F_FIBER },
   1611 
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1613 	  "I210 Gigabit Ethernet (SERDES)",
   1614 	  WM_T_I210,		WMP_F_SERDES },
   1615 
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1617 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1618 	  WM_T_I210,		WMP_F_SERDES },
   1619 
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1621 	  "I210 Gigabit Ethernet (SGMII)",
   1622 	  WM_T_I210,		WMP_F_COPPER },
   1623 
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1625 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1626 	  WM_T_I210,		WMP_F_COPPER },
   1627 
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1629 	  "I211 Ethernet (COPPER)",
   1630 	  WM_T_I211,		WMP_F_COPPER },
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1632 	  "I217 V Ethernet Connection",
   1633 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1635 	  "I217 LM Ethernet Connection",
   1636 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1638 	  "I218 V Ethernet Connection",
   1639 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1641 	  "I218 V Ethernet Connection",
   1642 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1644 	  "I218 V Ethernet Connection",
   1645 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1647 	  "I218 LM Ethernet Connection",
   1648 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1650 	  "I218 LM Ethernet Connection",
   1651 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1653 	  "I218 LM Ethernet Connection",
   1654 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1656 	  "I219 LM Ethernet Connection",
   1657 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1659 	  "I219 LM (2) Ethernet Connection",
   1660 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1662 	  "I219 LM (3) Ethernet Connection",
   1663 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1665 	  "I219 LM (4) Ethernet Connection",
   1666 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1668 	  "I219 LM (5) Ethernet Connection",
   1669 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1671 	  "I219 LM (6) Ethernet Connection",
   1672 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1674 	  "I219 LM (7) Ethernet Connection",
   1675 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1677 	  "I219 LM (8) Ethernet Connection",
   1678 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1680 	  "I219 LM (9) Ethernet Connection",
   1681 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1683 	  "I219 LM (10) Ethernet Connection",
   1684 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1686 	  "I219 LM (11) Ethernet Connection",
   1687 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1689 	  "I219 LM (12) Ethernet Connection",
   1690 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1692 	  "I219 LM (13) Ethernet Connection",
   1693 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1695 	  "I219 LM (14) Ethernet Connection",
   1696 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1698 	  "I219 LM (15) Ethernet Connection",
   1699 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1701 	  "I219 LM (16) Ethernet Connection",
   1702 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1704 	  "I219 LM (17) Ethernet Connection",
   1705 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1707 	  "I219 LM (18) Ethernet Connection",
   1708 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1710 	  "I219 LM (19) Ethernet Connection",
   1711 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1713 	  "I219 V Ethernet Connection",
   1714 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1715 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1716 	  "I219 V (2) Ethernet Connection",
   1717 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1719 	  "I219 V (4) Ethernet Connection",
   1720 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1722 	  "I219 V (5) Ethernet Connection",
   1723 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1725 	  "I219 V (6) Ethernet Connection",
   1726 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1727 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1728 	  "I219 V (7) Ethernet Connection",
   1729 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1731 	  "I219 V (8) Ethernet Connection",
   1732 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1734 	  "I219 V (9) Ethernet Connection",
   1735 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1737 	  "I219 V (10) Ethernet Connection",
   1738 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1739 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1740 	  "I219 V (11) Ethernet Connection",
   1741 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1743 	  "I219 V (12) Ethernet Connection",
   1744 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1746 	  "I219 V (13) Ethernet Connection",
   1747 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1749 	  "I219 V (14) Ethernet Connection",
   1750 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1752 	  "I219 V (15) Ethernet Connection",
   1753 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1755 	  "I219 V (16) Ethernet Connection",
   1756 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1758 	  "I219 V (17) Ethernet Connection",
   1759 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1761 	  "I219 V (18) Ethernet Connection",
   1762 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1763 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1764 	  "I219 V (19) Ethernet Connection",
   1765 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1766 	{ 0,			0,
   1767 	  NULL,
   1768 	  0,			0 },
   1769 };
   1770 
   1771 /*
   1772  * Register read/write functions.
   1773  * Other than CSR_{READ|WRITE}().
   1774  */
   1775 
   1776 #if 0 /* Not currently used */
   1777 static inline uint32_t
   1778 wm_io_read(struct wm_softc *sc, int reg)
   1779 {
   1780 
   1781 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1782 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1783 }
   1784 #endif
   1785 
   1786 static inline void
   1787 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1788 {
   1789 
   1790 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1791 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1792 }
   1793 
   1794 static inline void
   1795 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1796     uint32_t data)
   1797 {
   1798 	uint32_t regval;
   1799 	int i;
   1800 
   1801 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1802 
   1803 	CSR_WRITE(sc, reg, regval);
   1804 
   1805 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1806 		delay(5);
   1807 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1808 			break;
   1809 	}
   1810 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1811 		aprint_error("%s: WARNING:"
   1812 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1813 		    device_xname(sc->sc_dev), reg);
   1814 	}
   1815 }
   1816 
   1817 static inline void
   1818 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1819 {
   1820 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1821 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1822 }
   1823 
   1824 /*
   1825  * Descriptor sync/init functions.
   1826  */
   1827 static inline void
   1828 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1829 {
   1830 	struct wm_softc *sc = txq->txq_sc;
   1831 
   1832 	/* If it will wrap around, sync to the end of the ring. */
   1833 	if ((start + num) > WM_NTXDESC(txq)) {
   1834 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1835 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1836 		    (WM_NTXDESC(txq) - start), ops);
   1837 		num -= (WM_NTXDESC(txq) - start);
   1838 		start = 0;
   1839 	}
   1840 
   1841 	/* Now sync whatever is left. */
   1842 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1843 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1844 }
   1845 
   1846 static inline void
   1847 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1848 {
   1849 	struct wm_softc *sc = rxq->rxq_sc;
   1850 
   1851 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1852 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1853 }
   1854 
   1855 static inline void
   1856 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1857 {
   1858 	struct wm_softc *sc = rxq->rxq_sc;
   1859 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1860 	struct mbuf *m = rxs->rxs_mbuf;
   1861 
   1862 	/*
   1863 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1864 	 * so that the payload after the Ethernet header is aligned
   1865 	 * to a 4-byte boundary.
   1866 
   1867 	 * XXX BRAINDAMAGE ALERT!
   1868 	 * The stupid chip uses the same size for every buffer, which
   1869 	 * is set in the Receive Control register.  We are using the 2K
   1870 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1871 	 * reason, we can't "scoot" packets longer than the standard
   1872 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1873 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1874 	 * the upper layer copy the headers.
   1875 	 */
   1876 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1877 
   1878 	if (sc->sc_type == WM_T_82574) {
   1879 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1880 		rxd->erx_data.erxd_addr =
   1881 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1882 		rxd->erx_data.erxd_dd = 0;
   1883 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1884 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1885 
   1886 		rxd->nqrx_data.nrxd_paddr =
   1887 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1888 		/* Currently, split header is not supported. */
   1889 		rxd->nqrx_data.nrxd_haddr = 0;
   1890 	} else {
   1891 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1892 
   1893 		wm_set_dma_addr(&rxd->wrx_addr,
   1894 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1895 		rxd->wrx_len = 0;
   1896 		rxd->wrx_cksum = 0;
   1897 		rxd->wrx_status = 0;
   1898 		rxd->wrx_errors = 0;
   1899 		rxd->wrx_special = 0;
   1900 	}
   1901 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1902 
   1903 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1904 }
   1905 
   1906 /*
   1907  * Device driver interface functions and commonly used functions.
   1908  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1909  */
   1910 
   1911 /* Lookup supported device table */
   1912 static const struct wm_product *
   1913 wm_lookup(const struct pci_attach_args *pa)
   1914 {
   1915 	const struct wm_product *wmp;
   1916 
   1917 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1918 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1919 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1920 			return wmp;
   1921 	}
   1922 	return NULL;
   1923 }
   1924 
   1925 /* The match function (ca_match) */
   1926 static int
   1927 wm_match(device_t parent, cfdata_t cf, void *aux)
   1928 {
   1929 	struct pci_attach_args *pa = aux;
   1930 
   1931 	if (wm_lookup(pa) != NULL)
   1932 		return 1;
   1933 
   1934 	return 0;
   1935 }
   1936 
   1937 /* The attach function (ca_attach) */
   1938 static void
   1939 wm_attach(device_t parent, device_t self, void *aux)
   1940 {
   1941 	struct wm_softc *sc = device_private(self);
   1942 	struct pci_attach_args *pa = aux;
   1943 	prop_dictionary_t dict;
   1944 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1945 	pci_chipset_tag_t pc = pa->pa_pc;
   1946 	int counts[PCI_INTR_TYPE_SIZE];
   1947 	pci_intr_type_t max_type;
   1948 	const char *eetype, *xname;
   1949 	bus_space_tag_t memt;
   1950 	bus_space_handle_t memh;
   1951 	bus_size_t memsize;
   1952 	int memh_valid;
   1953 	int i, error;
   1954 	const struct wm_product *wmp;
   1955 	prop_data_t ea;
   1956 	prop_number_t pn;
   1957 	uint8_t enaddr[ETHER_ADDR_LEN];
   1958 	char buf[256];
   1959 	char wqname[MAXCOMLEN];
   1960 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1961 	pcireg_t preg, memtype;
   1962 	uint16_t eeprom_data, apme_mask;
   1963 	bool force_clear_smbi;
   1964 	uint32_t link_mode;
   1965 	uint32_t reg;
   1966 
   1967 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1968 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1969 #endif
   1970 	sc->sc_dev = self;
   1971 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   1972 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1973 	sc->sc_core_stopping = false;
   1974 
   1975 	wmp = wm_lookup(pa);
   1976 #ifdef DIAGNOSTIC
   1977 	if (wmp == NULL) {
   1978 		printf("\n");
   1979 		panic("wm_attach: impossible");
   1980 	}
   1981 #endif
   1982 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1983 
   1984 	sc->sc_pc = pa->pa_pc;
   1985 	sc->sc_pcitag = pa->pa_tag;
   1986 
   1987 	if (pci_dma64_available(pa)) {
   1988 		aprint_verbose(", 64-bit DMA");
   1989 		sc->sc_dmat = pa->pa_dmat64;
   1990 	} else {
   1991 		aprint_verbose(", 32-bit DMA");
   1992 		sc->sc_dmat = pa->pa_dmat;
   1993 	}
   1994 
   1995 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1996 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1997 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1998 
   1999 	sc->sc_type = wmp->wmp_type;
   2000 
   2001 	/* Set default function pointers */
   2002 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2003 	sc->phy.release = sc->nvm.release = wm_put_null;
   2004 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2005 
   2006 	if (sc->sc_type < WM_T_82543) {
   2007 		if (sc->sc_rev < 2) {
   2008 			aprint_error_dev(sc->sc_dev,
   2009 			    "i82542 must be at least rev. 2\n");
   2010 			return;
   2011 		}
   2012 		if (sc->sc_rev < 3)
   2013 			sc->sc_type = WM_T_82542_2_0;
   2014 	}
   2015 
   2016 	/*
   2017 	 * Disable MSI for Errata:
   2018 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2019 	 *
   2020 	 *  82544: Errata 25
   2021 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2022 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2023 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2024 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2025 	 *
   2026 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2027 	 *
   2028 	 *  82571 & 82572: Errata 63
   2029 	 */
   2030 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2031 	    || (sc->sc_type == WM_T_82572))
   2032 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2033 
   2034 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2035 	    || (sc->sc_type == WM_T_82580)
   2036 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2037 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2038 		sc->sc_flags |= WM_F_NEWQUEUE;
   2039 
   2040 	/* Set device properties (mactype) */
   2041 	dict = device_properties(sc->sc_dev);
   2042 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2043 
   2044 	/*
   2045 	 * Map the device.  All devices support memory-mapped acccess,
   2046 	 * and it is really required for normal operation.
   2047 	 */
   2048 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2049 	switch (memtype) {
   2050 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2051 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2052 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2053 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2054 		break;
   2055 	default:
   2056 		memh_valid = 0;
   2057 		break;
   2058 	}
   2059 
   2060 	if (memh_valid) {
   2061 		sc->sc_st = memt;
   2062 		sc->sc_sh = memh;
   2063 		sc->sc_ss = memsize;
   2064 	} else {
   2065 		aprint_error_dev(sc->sc_dev,
   2066 		    "unable to map device registers\n");
   2067 		return;
   2068 	}
   2069 
   2070 	/*
   2071 	 * In addition, i82544 and later support I/O mapped indirect
   2072 	 * register access.  It is not desirable (nor supported in
   2073 	 * this driver) to use it for normal operation, though it is
   2074 	 * required to work around bugs in some chip versions.
   2075 	 */
   2076 	switch (sc->sc_type) {
   2077 	case WM_T_82544:
   2078 	case WM_T_82541:
   2079 	case WM_T_82541_2:
   2080 	case WM_T_82547:
   2081 	case WM_T_82547_2:
   2082 		/* First we have to find the I/O BAR. */
   2083 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2084 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2085 			if (memtype == PCI_MAPREG_TYPE_IO)
   2086 				break;
   2087 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2088 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2089 				i += 4;	/* skip high bits, too */
   2090 		}
   2091 		if (i < PCI_MAPREG_END) {
   2092 			/*
   2093 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2094 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2095 			 * It's no problem because newer chips has no this
   2096 			 * bug.
   2097 			 *
   2098 			 * The i8254x doesn't apparently respond when the
   2099 			 * I/O BAR is 0, which looks somewhat like it's not
   2100 			 * been configured.
   2101 			 */
   2102 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2103 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2104 				aprint_error_dev(sc->sc_dev,
   2105 				    "WARNING: I/O BAR at zero.\n");
   2106 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2107 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2108 			    == 0) {
   2109 				sc->sc_flags |= WM_F_IOH_VALID;
   2110 			} else
   2111 				aprint_error_dev(sc->sc_dev,
   2112 				    "WARNING: unable to map I/O space\n");
   2113 		}
   2114 		break;
   2115 	default:
   2116 		break;
   2117 	}
   2118 
   2119 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2120 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2121 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2122 	if (sc->sc_type < WM_T_82542_2_1)
   2123 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2124 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2125 
   2126 	/* Power up chip */
   2127 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2128 	    && error != EOPNOTSUPP) {
   2129 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2130 		return;
   2131 	}
   2132 
   2133 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2134 	/*
   2135 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2136 	 * resource.
   2137 	 */
   2138 	if (sc->sc_nqueues > 1) {
   2139 		max_type = PCI_INTR_TYPE_MSIX;
   2140 		/*
   2141 		 *  82583 has a MSI-X capability in the PCI configuration space
   2142 		 * but it doesn't support it. At least the document doesn't
   2143 		 * say anything about MSI-X.
   2144 		 */
   2145 		counts[PCI_INTR_TYPE_MSIX]
   2146 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2147 	} else {
   2148 		max_type = PCI_INTR_TYPE_MSI;
   2149 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2150 	}
   2151 
   2152 	/* Allocation settings */
   2153 	counts[PCI_INTR_TYPE_MSI] = 1;
   2154 	counts[PCI_INTR_TYPE_INTX] = 1;
   2155 	/* overridden by disable flags */
   2156 	if (wm_disable_msi != 0) {
   2157 		counts[PCI_INTR_TYPE_MSI] = 0;
   2158 		if (wm_disable_msix != 0) {
   2159 			max_type = PCI_INTR_TYPE_INTX;
   2160 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2161 		}
   2162 	} else if (wm_disable_msix != 0) {
   2163 		max_type = PCI_INTR_TYPE_MSI;
   2164 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2165 	}
   2166 
   2167 alloc_retry:
   2168 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2169 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2170 		return;
   2171 	}
   2172 
   2173 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2174 		error = wm_setup_msix(sc);
   2175 		if (error) {
   2176 			pci_intr_release(pc, sc->sc_intrs,
   2177 			    counts[PCI_INTR_TYPE_MSIX]);
   2178 
   2179 			/* Setup for MSI: Disable MSI-X */
   2180 			max_type = PCI_INTR_TYPE_MSI;
   2181 			counts[PCI_INTR_TYPE_MSI] = 1;
   2182 			counts[PCI_INTR_TYPE_INTX] = 1;
   2183 			goto alloc_retry;
   2184 		}
   2185 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2186 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2187 		error = wm_setup_legacy(sc);
   2188 		if (error) {
   2189 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2190 			    counts[PCI_INTR_TYPE_MSI]);
   2191 
   2192 			/* The next try is for INTx: Disable MSI */
   2193 			max_type = PCI_INTR_TYPE_INTX;
   2194 			counts[PCI_INTR_TYPE_INTX] = 1;
   2195 			goto alloc_retry;
   2196 		}
   2197 	} else {
   2198 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2199 		error = wm_setup_legacy(sc);
   2200 		if (error) {
   2201 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2202 			    counts[PCI_INTR_TYPE_INTX]);
   2203 			return;
   2204 		}
   2205 	}
   2206 
   2207 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2208 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2209 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2210 	    WQ_PERCPU | WQ_MPSAFE);
   2211 	if (error) {
   2212 		aprint_error_dev(sc->sc_dev,
   2213 		    "unable to create TxRx workqueue\n");
   2214 		goto out;
   2215 	}
   2216 
   2217 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2218 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2219 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2220 	    WQ_MPSAFE);
   2221 	if (error) {
   2222 		workqueue_destroy(sc->sc_queue_wq);
   2223 		aprint_error_dev(sc->sc_dev,
   2224 		    "unable to create reset workqueue\n");
   2225 		goto out;
   2226 	}
   2227 
   2228 	/*
   2229 	 * Check the function ID (unit number of the chip).
   2230 	 */
   2231 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2232 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2233 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2234 	    || (sc->sc_type == WM_T_82580)
   2235 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2236 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2237 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2238 	else
   2239 		sc->sc_funcid = 0;
   2240 
   2241 	/*
   2242 	 * Determine a few things about the bus we're connected to.
   2243 	 */
   2244 	if (sc->sc_type < WM_T_82543) {
   2245 		/* We don't really know the bus characteristics here. */
   2246 		sc->sc_bus_speed = 33;
   2247 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2248 		/*
   2249 		 * CSA (Communication Streaming Architecture) is about as fast
   2250 		 * a 32-bit 66MHz PCI Bus.
   2251 		 */
   2252 		sc->sc_flags |= WM_F_CSA;
   2253 		sc->sc_bus_speed = 66;
   2254 		aprint_verbose_dev(sc->sc_dev,
   2255 		    "Communication Streaming Architecture\n");
   2256 		if (sc->sc_type == WM_T_82547) {
   2257 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2258 			callout_setfunc(&sc->sc_txfifo_ch,
   2259 			    wm_82547_txfifo_stall, sc);
   2260 			aprint_verbose_dev(sc->sc_dev,
   2261 			    "using 82547 Tx FIFO stall work-around\n");
   2262 		}
   2263 	} else if (sc->sc_type >= WM_T_82571) {
   2264 		sc->sc_flags |= WM_F_PCIE;
   2265 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2266 		    && (sc->sc_type != WM_T_ICH10)
   2267 		    && (sc->sc_type != WM_T_PCH)
   2268 		    && (sc->sc_type != WM_T_PCH2)
   2269 		    && (sc->sc_type != WM_T_PCH_LPT)
   2270 		    && (sc->sc_type != WM_T_PCH_SPT)
   2271 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2272 			/* ICH* and PCH* have no PCIe capability registers */
   2273 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2274 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2275 				NULL) == 0)
   2276 				aprint_error_dev(sc->sc_dev,
   2277 				    "unable to find PCIe capability\n");
   2278 		}
   2279 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2280 	} else {
   2281 		reg = CSR_READ(sc, WMREG_STATUS);
   2282 		if (reg & STATUS_BUS64)
   2283 			sc->sc_flags |= WM_F_BUS64;
   2284 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2285 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2286 
   2287 			sc->sc_flags |= WM_F_PCIX;
   2288 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2289 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2290 				aprint_error_dev(sc->sc_dev,
   2291 				    "unable to find PCIX capability\n");
   2292 			else if (sc->sc_type != WM_T_82545_3 &&
   2293 			    sc->sc_type != WM_T_82546_3) {
   2294 				/*
   2295 				 * Work around a problem caused by the BIOS
   2296 				 * setting the max memory read byte count
   2297 				 * incorrectly.
   2298 				 */
   2299 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2300 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2301 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2302 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2303 
   2304 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2305 				    PCIX_CMD_BYTECNT_SHIFT;
   2306 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2307 				    PCIX_STATUS_MAXB_SHIFT;
   2308 				if (bytecnt > maxb) {
   2309 					aprint_verbose_dev(sc->sc_dev,
   2310 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2311 					    512 << bytecnt, 512 << maxb);
   2312 					pcix_cmd = (pcix_cmd &
   2313 					    ~PCIX_CMD_BYTECNT_MASK) |
   2314 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2315 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2316 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2317 					    pcix_cmd);
   2318 				}
   2319 			}
   2320 		}
   2321 		/*
   2322 		 * The quad port adapter is special; it has a PCIX-PCIX
   2323 		 * bridge on the board, and can run the secondary bus at
   2324 		 * a higher speed.
   2325 		 */
   2326 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2327 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2328 								      : 66;
   2329 		} else if (sc->sc_flags & WM_F_PCIX) {
   2330 			switch (reg & STATUS_PCIXSPD_MASK) {
   2331 			case STATUS_PCIXSPD_50_66:
   2332 				sc->sc_bus_speed = 66;
   2333 				break;
   2334 			case STATUS_PCIXSPD_66_100:
   2335 				sc->sc_bus_speed = 100;
   2336 				break;
   2337 			case STATUS_PCIXSPD_100_133:
   2338 				sc->sc_bus_speed = 133;
   2339 				break;
   2340 			default:
   2341 				aprint_error_dev(sc->sc_dev,
   2342 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2343 				    reg & STATUS_PCIXSPD_MASK);
   2344 				sc->sc_bus_speed = 66;
   2345 				break;
   2346 			}
   2347 		} else
   2348 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2349 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2350 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2351 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2352 	}
   2353 
   2354 	/* clear interesting stat counters */
   2355 	CSR_READ(sc, WMREG_COLC);
   2356 	CSR_READ(sc, WMREG_RXERRC);
   2357 
   2358 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2359 	    || (sc->sc_type >= WM_T_ICH8))
   2360 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2361 	if (sc->sc_type >= WM_T_ICH8)
   2362 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2363 
   2364 	/* Set PHY, NVM mutex related stuff */
   2365 	switch (sc->sc_type) {
   2366 	case WM_T_82542_2_0:
   2367 	case WM_T_82542_2_1:
   2368 	case WM_T_82543:
   2369 	case WM_T_82544:
   2370 		/* Microwire */
   2371 		sc->nvm.read = wm_nvm_read_uwire;
   2372 		sc->sc_nvm_wordsize = 64;
   2373 		sc->sc_nvm_addrbits = 6;
   2374 		break;
   2375 	case WM_T_82540:
   2376 	case WM_T_82545:
   2377 	case WM_T_82545_3:
   2378 	case WM_T_82546:
   2379 	case WM_T_82546_3:
   2380 		/* Microwire */
   2381 		sc->nvm.read = wm_nvm_read_uwire;
   2382 		reg = CSR_READ(sc, WMREG_EECD);
   2383 		if (reg & EECD_EE_SIZE) {
   2384 			sc->sc_nvm_wordsize = 256;
   2385 			sc->sc_nvm_addrbits = 8;
   2386 		} else {
   2387 			sc->sc_nvm_wordsize = 64;
   2388 			sc->sc_nvm_addrbits = 6;
   2389 		}
   2390 		sc->sc_flags |= WM_F_LOCK_EECD;
   2391 		sc->nvm.acquire = wm_get_eecd;
   2392 		sc->nvm.release = wm_put_eecd;
   2393 		break;
   2394 	case WM_T_82541:
   2395 	case WM_T_82541_2:
   2396 	case WM_T_82547:
   2397 	case WM_T_82547_2:
   2398 		reg = CSR_READ(sc, WMREG_EECD);
   2399 		/*
   2400 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2401 		 * on 8254[17], so set flags and functios before calling it.
   2402 		 */
   2403 		sc->sc_flags |= WM_F_LOCK_EECD;
   2404 		sc->nvm.acquire = wm_get_eecd;
   2405 		sc->nvm.release = wm_put_eecd;
   2406 		if (reg & EECD_EE_TYPE) {
   2407 			/* SPI */
   2408 			sc->nvm.read = wm_nvm_read_spi;
   2409 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2410 			wm_nvm_set_addrbits_size_eecd(sc);
   2411 		} else {
   2412 			/* Microwire */
   2413 			sc->nvm.read = wm_nvm_read_uwire;
   2414 			if ((reg & EECD_EE_ABITS) != 0) {
   2415 				sc->sc_nvm_wordsize = 256;
   2416 				sc->sc_nvm_addrbits = 8;
   2417 			} else {
   2418 				sc->sc_nvm_wordsize = 64;
   2419 				sc->sc_nvm_addrbits = 6;
   2420 			}
   2421 		}
   2422 		break;
   2423 	case WM_T_82571:
   2424 	case WM_T_82572:
   2425 		/* SPI */
   2426 		sc->nvm.read = wm_nvm_read_eerd;
   2427 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2428 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2429 		wm_nvm_set_addrbits_size_eecd(sc);
   2430 		sc->phy.acquire = wm_get_swsm_semaphore;
   2431 		sc->phy.release = wm_put_swsm_semaphore;
   2432 		sc->nvm.acquire = wm_get_nvm_82571;
   2433 		sc->nvm.release = wm_put_nvm_82571;
   2434 		break;
   2435 	case WM_T_82573:
   2436 	case WM_T_82574:
   2437 	case WM_T_82583:
   2438 		sc->nvm.read = wm_nvm_read_eerd;
   2439 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2440 		if (sc->sc_type == WM_T_82573) {
   2441 			sc->phy.acquire = wm_get_swsm_semaphore;
   2442 			sc->phy.release = wm_put_swsm_semaphore;
   2443 			sc->nvm.acquire = wm_get_nvm_82571;
   2444 			sc->nvm.release = wm_put_nvm_82571;
   2445 		} else {
   2446 			/* Both PHY and NVM use the same semaphore. */
   2447 			sc->phy.acquire = sc->nvm.acquire
   2448 			    = wm_get_swfwhw_semaphore;
   2449 			sc->phy.release = sc->nvm.release
   2450 			    = wm_put_swfwhw_semaphore;
   2451 		}
   2452 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2453 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2454 			sc->sc_nvm_wordsize = 2048;
   2455 		} else {
   2456 			/* SPI */
   2457 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2458 			wm_nvm_set_addrbits_size_eecd(sc);
   2459 		}
   2460 		break;
   2461 	case WM_T_82575:
   2462 	case WM_T_82576:
   2463 	case WM_T_82580:
   2464 	case WM_T_I350:
   2465 	case WM_T_I354:
   2466 	case WM_T_80003:
   2467 		/* SPI */
   2468 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2469 		wm_nvm_set_addrbits_size_eecd(sc);
   2470 		if ((sc->sc_type == WM_T_80003)
   2471 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2472 			sc->nvm.read = wm_nvm_read_eerd;
   2473 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2474 		} else {
   2475 			sc->nvm.read = wm_nvm_read_spi;
   2476 			sc->sc_flags |= WM_F_LOCK_EECD;
   2477 		}
   2478 		sc->phy.acquire = wm_get_phy_82575;
   2479 		sc->phy.release = wm_put_phy_82575;
   2480 		sc->nvm.acquire = wm_get_nvm_80003;
   2481 		sc->nvm.release = wm_put_nvm_80003;
   2482 		break;
   2483 	case WM_T_ICH8:
   2484 	case WM_T_ICH9:
   2485 	case WM_T_ICH10:
   2486 	case WM_T_PCH:
   2487 	case WM_T_PCH2:
   2488 	case WM_T_PCH_LPT:
   2489 		sc->nvm.read = wm_nvm_read_ich8;
   2490 		/* FLASH */
   2491 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2492 		sc->sc_nvm_wordsize = 2048;
   2493 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2494 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2495 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2496 			aprint_error_dev(sc->sc_dev,
   2497 			    "can't map FLASH registers\n");
   2498 			goto out;
   2499 		}
   2500 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2501 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2502 		    ICH_FLASH_SECTOR_SIZE;
   2503 		sc->sc_ich8_flash_bank_size =
   2504 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2505 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2506 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2507 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2508 		sc->sc_flashreg_offset = 0;
   2509 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2510 		sc->phy.release = wm_put_swflag_ich8lan;
   2511 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2512 		sc->nvm.release = wm_put_nvm_ich8lan;
   2513 		break;
   2514 	case WM_T_PCH_SPT:
   2515 	case WM_T_PCH_CNP:
   2516 		sc->nvm.read = wm_nvm_read_spt;
   2517 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2518 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2519 		sc->sc_flasht = sc->sc_st;
   2520 		sc->sc_flashh = sc->sc_sh;
   2521 		sc->sc_ich8_flash_base = 0;
   2522 		sc->sc_nvm_wordsize =
   2523 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2524 		    * NVM_SIZE_MULTIPLIER;
   2525 		/* It is size in bytes, we want words */
   2526 		sc->sc_nvm_wordsize /= 2;
   2527 		/* Assume 2 banks */
   2528 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2529 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2530 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2531 		sc->phy.release = wm_put_swflag_ich8lan;
   2532 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2533 		sc->nvm.release = wm_put_nvm_ich8lan;
   2534 		break;
   2535 	case WM_T_I210:
   2536 	case WM_T_I211:
   2537 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2538 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2539 		if (wm_nvm_flash_presence_i210(sc)) {
   2540 			sc->nvm.read = wm_nvm_read_eerd;
   2541 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2542 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2543 			wm_nvm_set_addrbits_size_eecd(sc);
   2544 		} else {
   2545 			sc->nvm.read = wm_nvm_read_invm;
   2546 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2547 			sc->sc_nvm_wordsize = INVM_SIZE;
   2548 		}
   2549 		sc->phy.acquire = wm_get_phy_82575;
   2550 		sc->phy.release = wm_put_phy_82575;
   2551 		sc->nvm.acquire = wm_get_nvm_80003;
   2552 		sc->nvm.release = wm_put_nvm_80003;
   2553 		break;
   2554 	default:
   2555 		break;
   2556 	}
   2557 
   2558 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2559 	switch (sc->sc_type) {
   2560 	case WM_T_82571:
   2561 	case WM_T_82572:
   2562 		reg = CSR_READ(sc, WMREG_SWSM2);
   2563 		if ((reg & SWSM2_LOCK) == 0) {
   2564 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2565 			force_clear_smbi = true;
   2566 		} else
   2567 			force_clear_smbi = false;
   2568 		break;
   2569 	case WM_T_82573:
   2570 	case WM_T_82574:
   2571 	case WM_T_82583:
   2572 		force_clear_smbi = true;
   2573 		break;
   2574 	default:
   2575 		force_clear_smbi = false;
   2576 		break;
   2577 	}
   2578 	if (force_clear_smbi) {
   2579 		reg = CSR_READ(sc, WMREG_SWSM);
   2580 		if ((reg & SWSM_SMBI) != 0)
   2581 			aprint_error_dev(sc->sc_dev,
   2582 			    "Please update the Bootagent\n");
   2583 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2584 	}
   2585 
   2586 	/*
   2587 	 * Defer printing the EEPROM type until after verifying the checksum
   2588 	 * This allows the EEPROM type to be printed correctly in the case
   2589 	 * that no EEPROM is attached.
   2590 	 */
   2591 	/*
   2592 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2593 	 * this for later, so we can fail future reads from the EEPROM.
   2594 	 */
   2595 	if (wm_nvm_validate_checksum(sc)) {
   2596 		/*
   2597 		 * Read twice again because some PCI-e parts fail the
   2598 		 * first check due to the link being in sleep state.
   2599 		 */
   2600 		if (wm_nvm_validate_checksum(sc))
   2601 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2602 	}
   2603 
   2604 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2605 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2606 	else {
   2607 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2608 		    sc->sc_nvm_wordsize);
   2609 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2610 			aprint_verbose("iNVM");
   2611 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2612 			aprint_verbose("FLASH(HW)");
   2613 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2614 			aprint_verbose("FLASH");
   2615 		else {
   2616 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2617 				eetype = "SPI";
   2618 			else
   2619 				eetype = "MicroWire";
   2620 			aprint_verbose("(%d address bits) %s EEPROM",
   2621 			    sc->sc_nvm_addrbits, eetype);
   2622 		}
   2623 	}
   2624 	wm_nvm_version(sc);
   2625 	aprint_verbose("\n");
   2626 
   2627 	/*
   2628 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2629 	 * incorrect.
   2630 	 */
   2631 	wm_gmii_setup_phytype(sc, 0, 0);
   2632 
   2633 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2634 	switch (sc->sc_type) {
   2635 	case WM_T_ICH8:
   2636 	case WM_T_ICH9:
   2637 	case WM_T_ICH10:
   2638 	case WM_T_PCH:
   2639 	case WM_T_PCH2:
   2640 	case WM_T_PCH_LPT:
   2641 	case WM_T_PCH_SPT:
   2642 	case WM_T_PCH_CNP:
   2643 		apme_mask = WUC_APME;
   2644 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2645 		if ((eeprom_data & apme_mask) != 0)
   2646 			sc->sc_flags |= WM_F_WOL;
   2647 		break;
   2648 	default:
   2649 		break;
   2650 	}
   2651 
   2652 	/* Reset the chip to a known state. */
   2653 	wm_reset(sc);
   2654 
   2655 	/*
   2656 	 * Check for I21[01] PLL workaround.
   2657 	 *
   2658 	 * Three cases:
   2659 	 * a) Chip is I211.
   2660 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2661 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2662 	 */
   2663 	if (sc->sc_type == WM_T_I211)
   2664 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2665 	if (sc->sc_type == WM_T_I210) {
   2666 		if (!wm_nvm_flash_presence_i210(sc))
   2667 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2668 		else if ((sc->sc_nvm_ver_major < 3)
   2669 		    || ((sc->sc_nvm_ver_major == 3)
   2670 			&& (sc->sc_nvm_ver_minor < 25))) {
   2671 			aprint_verbose_dev(sc->sc_dev,
   2672 			    "ROM image version %d.%d is older than 3.25\n",
   2673 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2674 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2675 		}
   2676 	}
   2677 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2678 		wm_pll_workaround_i210(sc);
   2679 
   2680 	wm_get_wakeup(sc);
   2681 
   2682 	/* Non-AMT based hardware can now take control from firmware */
   2683 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2684 		wm_get_hw_control(sc);
   2685 
   2686 	/*
   2687 	 * Read the Ethernet address from the EEPROM, if not first found
   2688 	 * in device properties.
   2689 	 */
   2690 	ea = prop_dictionary_get(dict, "mac-address");
   2691 	if (ea != NULL) {
   2692 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2693 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2694 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2695 	} else {
   2696 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2697 			aprint_error_dev(sc->sc_dev,
   2698 			    "unable to read Ethernet address\n");
   2699 			goto out;
   2700 		}
   2701 	}
   2702 
   2703 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2704 	    ether_sprintf(enaddr));
   2705 
   2706 	/*
   2707 	 * Read the config info from the EEPROM, and set up various
   2708 	 * bits in the control registers based on their contents.
   2709 	 */
   2710 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2711 	if (pn != NULL) {
   2712 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2713 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2714 	} else {
   2715 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2716 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2717 			goto out;
   2718 		}
   2719 	}
   2720 
   2721 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2722 	if (pn != NULL) {
   2723 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2724 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2725 	} else {
   2726 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2727 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2728 			goto out;
   2729 		}
   2730 	}
   2731 
   2732 	/* check for WM_F_WOL */
   2733 	switch (sc->sc_type) {
   2734 	case WM_T_82542_2_0:
   2735 	case WM_T_82542_2_1:
   2736 	case WM_T_82543:
   2737 		/* dummy? */
   2738 		eeprom_data = 0;
   2739 		apme_mask = NVM_CFG3_APME;
   2740 		break;
   2741 	case WM_T_82544:
   2742 		apme_mask = NVM_CFG2_82544_APM_EN;
   2743 		eeprom_data = cfg2;
   2744 		break;
   2745 	case WM_T_82546:
   2746 	case WM_T_82546_3:
   2747 	case WM_T_82571:
   2748 	case WM_T_82572:
   2749 	case WM_T_82573:
   2750 	case WM_T_82574:
   2751 	case WM_T_82583:
   2752 	case WM_T_80003:
   2753 	case WM_T_82575:
   2754 	case WM_T_82576:
   2755 		apme_mask = NVM_CFG3_APME;
   2756 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2757 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2758 		break;
   2759 	case WM_T_82580:
   2760 	case WM_T_I350:
   2761 	case WM_T_I354:
   2762 	case WM_T_I210:
   2763 	case WM_T_I211:
   2764 		apme_mask = NVM_CFG3_APME;
   2765 		wm_nvm_read(sc,
   2766 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2767 		    1, &eeprom_data);
   2768 		break;
   2769 	case WM_T_ICH8:
   2770 	case WM_T_ICH9:
   2771 	case WM_T_ICH10:
   2772 	case WM_T_PCH:
   2773 	case WM_T_PCH2:
   2774 	case WM_T_PCH_LPT:
   2775 	case WM_T_PCH_SPT:
   2776 	case WM_T_PCH_CNP:
   2777 		/* Already checked before wm_reset () */
   2778 		apme_mask = eeprom_data = 0;
   2779 		break;
   2780 	default: /* XXX 82540 */
   2781 		apme_mask = NVM_CFG3_APME;
   2782 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2783 		break;
   2784 	}
   2785 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2786 	if ((eeprom_data & apme_mask) != 0)
   2787 		sc->sc_flags |= WM_F_WOL;
   2788 
   2789 	/*
   2790 	 * We have the eeprom settings, now apply the special cases
   2791 	 * where the eeprom may be wrong or the board won't support
   2792 	 * wake on lan on a particular port
   2793 	 */
   2794 	switch (sc->sc_pcidevid) {
   2795 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2796 		sc->sc_flags &= ~WM_F_WOL;
   2797 		break;
   2798 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2799 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2800 		/* Wake events only supported on port A for dual fiber
   2801 		 * regardless of eeprom setting */
   2802 		if (sc->sc_funcid == 1)
   2803 			sc->sc_flags &= ~WM_F_WOL;
   2804 		break;
   2805 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2806 		/* If quad port adapter, disable WoL on all but port A */
   2807 		if (sc->sc_funcid != 0)
   2808 			sc->sc_flags &= ~WM_F_WOL;
   2809 		break;
   2810 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2811 		/* Wake events only supported on port A for dual fiber
   2812 		 * regardless of eeprom setting */
   2813 		if (sc->sc_funcid == 1)
   2814 			sc->sc_flags &= ~WM_F_WOL;
   2815 		break;
   2816 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2817 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2818 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2819 		/* If quad port adapter, disable WoL on all but port A */
   2820 		if (sc->sc_funcid != 0)
   2821 			sc->sc_flags &= ~WM_F_WOL;
   2822 		break;
   2823 	}
   2824 
   2825 	if (sc->sc_type >= WM_T_82575) {
   2826 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2827 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2828 			    nvmword);
   2829 			if ((sc->sc_type == WM_T_82575) ||
   2830 			    (sc->sc_type == WM_T_82576)) {
   2831 				/* Check NVM for autonegotiation */
   2832 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2833 				    != 0)
   2834 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2835 			}
   2836 			if ((sc->sc_type == WM_T_82575) ||
   2837 			    (sc->sc_type == WM_T_I350)) {
   2838 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2839 					sc->sc_flags |= WM_F_MAS;
   2840 			}
   2841 		}
   2842 	}
   2843 
   2844 	/*
   2845 	 * XXX need special handling for some multiple port cards
   2846 	 * to disable a paticular port.
   2847 	 */
   2848 
   2849 	if (sc->sc_type >= WM_T_82544) {
   2850 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2851 		if (pn != NULL) {
   2852 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2853 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2854 		} else {
   2855 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2856 				aprint_error_dev(sc->sc_dev,
   2857 				    "unable to read SWDPIN\n");
   2858 				goto out;
   2859 			}
   2860 		}
   2861 	}
   2862 
   2863 	if (cfg1 & NVM_CFG1_ILOS)
   2864 		sc->sc_ctrl |= CTRL_ILOS;
   2865 
   2866 	/*
   2867 	 * XXX
   2868 	 * This code isn't correct because pin 2 and 3 are located
   2869 	 * in different position on newer chips. Check all datasheet.
   2870 	 *
   2871 	 * Until resolve this problem, check if a chip < 82580
   2872 	 */
   2873 	if (sc->sc_type <= WM_T_82580) {
   2874 		if (sc->sc_type >= WM_T_82544) {
   2875 			sc->sc_ctrl |=
   2876 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2877 			    CTRL_SWDPIO_SHIFT;
   2878 			sc->sc_ctrl |=
   2879 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2880 			    CTRL_SWDPINS_SHIFT;
   2881 		} else {
   2882 			sc->sc_ctrl |=
   2883 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2884 			    CTRL_SWDPIO_SHIFT;
   2885 		}
   2886 	}
   2887 
   2888 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2889 		wm_nvm_read(sc,
   2890 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2891 		    1, &nvmword);
   2892 		if (nvmword & NVM_CFG3_ILOS)
   2893 			sc->sc_ctrl |= CTRL_ILOS;
   2894 	}
   2895 
   2896 #if 0
   2897 	if (sc->sc_type >= WM_T_82544) {
   2898 		if (cfg1 & NVM_CFG1_IPS0)
   2899 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2900 		if (cfg1 & NVM_CFG1_IPS1)
   2901 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2902 		sc->sc_ctrl_ext |=
   2903 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2904 		    CTRL_EXT_SWDPIO_SHIFT;
   2905 		sc->sc_ctrl_ext |=
   2906 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2907 		    CTRL_EXT_SWDPINS_SHIFT;
   2908 	} else {
   2909 		sc->sc_ctrl_ext |=
   2910 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2911 		    CTRL_EXT_SWDPIO_SHIFT;
   2912 	}
   2913 #endif
   2914 
   2915 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2916 #if 0
   2917 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2918 #endif
   2919 
   2920 	if (sc->sc_type == WM_T_PCH) {
   2921 		uint16_t val;
   2922 
   2923 		/* Save the NVM K1 bit setting */
   2924 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2925 
   2926 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2927 			sc->sc_nvm_k1_enabled = 1;
   2928 		else
   2929 			sc->sc_nvm_k1_enabled = 0;
   2930 	}
   2931 
   2932 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2933 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2934 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2935 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2936 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2937 	    || sc->sc_type == WM_T_82573
   2938 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2939 		/* Copper only */
   2940 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2941 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2942 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2943 	    || (sc->sc_type ==WM_T_I211)) {
   2944 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2945 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2946 		switch (link_mode) {
   2947 		case CTRL_EXT_LINK_MODE_1000KX:
   2948 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2949 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2950 			break;
   2951 		case CTRL_EXT_LINK_MODE_SGMII:
   2952 			if (wm_sgmii_uses_mdio(sc)) {
   2953 				aprint_normal_dev(sc->sc_dev,
   2954 				    "SGMII(MDIO)\n");
   2955 				sc->sc_flags |= WM_F_SGMII;
   2956 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2957 				break;
   2958 			}
   2959 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2960 			/*FALLTHROUGH*/
   2961 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2962 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2963 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2964 				if (link_mode
   2965 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2966 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2967 					sc->sc_flags |= WM_F_SGMII;
   2968 					aprint_verbose_dev(sc->sc_dev,
   2969 					    "SGMII\n");
   2970 				} else {
   2971 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2972 					aprint_verbose_dev(sc->sc_dev,
   2973 					    "SERDES\n");
   2974 				}
   2975 				break;
   2976 			}
   2977 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2978 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2979 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2980 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2981 				sc->sc_flags |= WM_F_SGMII;
   2982 			}
   2983 			/* Do not change link mode for 100BaseFX */
   2984 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2985 				break;
   2986 
   2987 			/* Change current link mode setting */
   2988 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2989 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2990 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2991 			else
   2992 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2993 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2994 			break;
   2995 		case CTRL_EXT_LINK_MODE_GMII:
   2996 		default:
   2997 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2998 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2999 			break;
   3000 		}
   3001 
   3002 		reg &= ~CTRL_EXT_I2C_ENA;
   3003 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3004 			reg |= CTRL_EXT_I2C_ENA;
   3005 		else
   3006 			reg &= ~CTRL_EXT_I2C_ENA;
   3007 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3008 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3009 			if (!wm_sgmii_uses_mdio(sc))
   3010 				wm_gmii_setup_phytype(sc, 0, 0);
   3011 			wm_reset_mdicnfg_82580(sc);
   3012 		}
   3013 	} else if (sc->sc_type < WM_T_82543 ||
   3014 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3015 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3016 			aprint_error_dev(sc->sc_dev,
   3017 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3018 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3019 		}
   3020 	} else {
   3021 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3022 			aprint_error_dev(sc->sc_dev,
   3023 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3024 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3025 		}
   3026 	}
   3027 
   3028 	if (sc->sc_type >= WM_T_PCH2)
   3029 		sc->sc_flags |= WM_F_EEE;
   3030 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3031 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3032 		/* XXX: Need special handling for I354. (not yet) */
   3033 		if (sc->sc_type != WM_T_I354)
   3034 			sc->sc_flags |= WM_F_EEE;
   3035 	}
   3036 
   3037 	/*
   3038 	 * The I350 has a bug where it always strips the CRC whether
   3039 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3040 	 */
   3041 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3042 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3043 		sc->sc_flags |= WM_F_CRC_STRIP;
   3044 
   3045 	/* Set device properties (macflags) */
   3046 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3047 
   3048 	if (sc->sc_flags != 0) {
   3049 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3050 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3051 	}
   3052 
   3053 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3054 
   3055 	/* Initialize the media structures accordingly. */
   3056 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3057 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3058 	else
   3059 		wm_tbi_mediainit(sc); /* All others */
   3060 
   3061 	ifp = &sc->sc_ethercom.ec_if;
   3062 	xname = device_xname(sc->sc_dev);
   3063 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3064 	ifp->if_softc = sc;
   3065 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3066 	ifp->if_extflags = IFEF_MPSAFE;
   3067 	ifp->if_ioctl = wm_ioctl;
   3068 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3069 		ifp->if_start = wm_nq_start;
   3070 		/*
   3071 		 * When the number of CPUs is one and the controller can use
   3072 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3073 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3074 		 * and the other is used for link status changing.
   3075 		 * In this situation, wm_nq_transmit() is disadvantageous
   3076 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3077 		 */
   3078 		if (wm_is_using_multiqueue(sc))
   3079 			ifp->if_transmit = wm_nq_transmit;
   3080 	} else {
   3081 		ifp->if_start = wm_start;
   3082 		/*
   3083 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3084 		 * described above.
   3085 		 */
   3086 		if (wm_is_using_multiqueue(sc))
   3087 			ifp->if_transmit = wm_transmit;
   3088 	}
   3089 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3090 	ifp->if_init = wm_init;
   3091 	ifp->if_stop = wm_stop;
   3092 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3093 	IFQ_SET_READY(&ifp->if_snd);
   3094 
   3095 	/* Check for jumbo frame */
   3096 	switch (sc->sc_type) {
   3097 	case WM_T_82573:
   3098 		/* XXX limited to 9234 if ASPM is disabled */
   3099 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3100 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3101 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3102 		break;
   3103 	case WM_T_82571:
   3104 	case WM_T_82572:
   3105 	case WM_T_82574:
   3106 	case WM_T_82583:
   3107 	case WM_T_82575:
   3108 	case WM_T_82576:
   3109 	case WM_T_82580:
   3110 	case WM_T_I350:
   3111 	case WM_T_I354:
   3112 	case WM_T_I210:
   3113 	case WM_T_I211:
   3114 	case WM_T_80003:
   3115 	case WM_T_ICH9:
   3116 	case WM_T_ICH10:
   3117 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3118 	case WM_T_PCH_LPT:
   3119 	case WM_T_PCH_SPT:
   3120 	case WM_T_PCH_CNP:
   3121 		/* XXX limited to 9234 */
   3122 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3123 		break;
   3124 	case WM_T_PCH:
   3125 		/* XXX limited to 4096 */
   3126 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3127 		break;
   3128 	case WM_T_82542_2_0:
   3129 	case WM_T_82542_2_1:
   3130 	case WM_T_ICH8:
   3131 		/* No support for jumbo frame */
   3132 		break;
   3133 	default:
   3134 		/* ETHER_MAX_LEN_JUMBO */
   3135 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3136 		break;
   3137 	}
   3138 
   3139 	/* If we're a i82543 or greater, we can support VLANs. */
   3140 	if (sc->sc_type >= WM_T_82543) {
   3141 		sc->sc_ethercom.ec_capabilities |=
   3142 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3143 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3144 	}
   3145 
   3146 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3147 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3148 
   3149 	/*
   3150 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3151 	 * on i82543 and later.
   3152 	 */
   3153 	if (sc->sc_type >= WM_T_82543) {
   3154 		ifp->if_capabilities |=
   3155 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3156 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3157 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3158 		    IFCAP_CSUM_TCPv6_Tx |
   3159 		    IFCAP_CSUM_UDPv6_Tx;
   3160 	}
   3161 
   3162 	/*
   3163 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3164 	 *
   3165 	 *	82541GI (8086:1076) ... no
   3166 	 *	82572EI (8086:10b9) ... yes
   3167 	 */
   3168 	if (sc->sc_type >= WM_T_82571) {
   3169 		ifp->if_capabilities |=
   3170 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3171 	}
   3172 
   3173 	/*
   3174 	 * If we're a i82544 or greater (except i82547), we can do
   3175 	 * TCP segmentation offload.
   3176 	 */
   3177 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3178 		ifp->if_capabilities |= IFCAP_TSOv4;
   3179 
   3180 	if (sc->sc_type >= WM_T_82571)
   3181 		ifp->if_capabilities |= IFCAP_TSOv6;
   3182 
   3183 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3184 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3185 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3186 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3187 
   3188 	/* Attach the interface. */
   3189 	if_initialize(ifp);
   3190 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3191 	ether_ifattach(ifp, enaddr);
   3192 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3193 	if_register(ifp);
   3194 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3195 	    RND_FLAG_DEFAULT);
   3196 
   3197 #ifdef WM_EVENT_COUNTERS
   3198 	/* Attach event counters. */
   3199 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3200 	    NULL, xname, "linkintr");
   3201 
   3202 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3203 	    NULL, xname, "CRC Error");
   3204 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3205 	    NULL, xname, "Symbol Error");
   3206 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3207 	    NULL, xname, "Missed Packets");
   3208 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3209 	    NULL, xname, "Collision");
   3210 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3211 	    NULL, xname, "Sequence Error");
   3212 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3213 	    NULL, xname, "Receive Length Error");
   3214 
   3215 	if (sc->sc_type >= WM_T_82543) {
   3216 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3217 		    NULL, xname, "Alignment Error");
   3218 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3219 		    NULL, xname, "Receive Error");
   3220 		evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
   3221 		    NULL, xname, "Carrier Extension Error");
   3222 
   3223 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3224 		    NULL, xname, "Tx with No CRS");
   3225 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3226 		    NULL, xname, "TCP Segmentation Context Tx");
   3227 		evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
   3228 		    NULL, xname, "TCP Segmentation Context Tx Fail");
   3229 	}
   3230 
   3231 	if (sc->sc_type >= WM_T_82542_2_1) {
   3232 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3233 		    NULL, xname, "tx_xoff");
   3234 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3235 		    NULL, xname, "tx_xon");
   3236 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3237 		    NULL, xname, "rx_xoff");
   3238 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3239 		    NULL, xname, "rx_xon");
   3240 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3241 		    NULL, xname, "rx_macctl");
   3242 	}
   3243 
   3244 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3245 	    NULL, xname, "Single Collision");
   3246 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3247 	    NULL, xname, "Excessive Collisions");
   3248 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3249 	    NULL, xname, "Multiple Collision");
   3250 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3251 	    NULL, xname, "Late Collisions");
   3252 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3253 	    NULL, xname, "Defer");
   3254 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3255 	    NULL, xname, "Packets Rx (64 bytes)");
   3256 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3257 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3258 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3259 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3260 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3261 	    NULL, xname, "Packets Rx (255-511 bytes)");
   3262 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3263 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3264 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3265 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3266 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3267 	    NULL, xname, "Good Packets Rx");
   3268 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3269 	    NULL, xname, "Broadcast Packets Rx");
   3270 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3271 	    NULL, xname, "Multicast Packets Rx");
   3272 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3273 	    NULL, xname, "Good Packets Tx");
   3274 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3275 	    NULL, xname, "Good Octets Rx");
   3276 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3277 	    NULL, xname, "Good Octets Tx");
   3278 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3279 	    NULL, xname, "Rx No Buffers");
   3280 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3281 	    NULL, xname, "Rx Undersize");
   3282 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3283 	    NULL, xname, "Rx Fragment");
   3284 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3285 	    NULL, xname, "Rx Oversize");
   3286 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3287 	    NULL, xname, "Rx Jabber");
   3288 	if (sc->sc_type >= WM_T_82540) {
   3289 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3290 		    NULL, xname, "Management Packets RX");
   3291 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3292 		    NULL, xname, "Management Packets Dropped");
   3293 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3294 		    NULL, xname, "Management Packets TX");
   3295 	}
   3296 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3297 	    NULL, xname, "Total Octets Rx");
   3298 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3299 	    NULL, xname, "Total Octets Tx");
   3300 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3301 	    NULL, xname, "Total Packets Rx");
   3302 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3303 	    NULL, xname, "Total Packets Tx");
   3304 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3305 	    NULL, xname, "Packets Tx (64 bytes)");
   3306 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3307 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3308 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3309 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3310 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3311 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3312 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3313 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3314 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3315 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3316 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3317 	    NULL, xname, "Multicast Packets Tx");
   3318 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3319 	    NULL, xname, "Broadcast Packets Tx Count");
   3320 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3321 	    NULL, xname, "Interrupt Assertion");
   3322 	if (sc->sc_type < WM_T_82575) {
   3323 		evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3324 		    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3325 		evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3326 		    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3327 		evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3328 		    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3329 		evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
   3330 		    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3331 		evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3332 		    NULL, xname, "Intr. Cause Tx Queue Empty");
   3333 		evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3334 		    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3335 		evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
   3336 		    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3337 		evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3338 		    NULL, xname, "Interrupt Cause Receiver Overrun");
   3339 	}
   3340 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3341 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3342 		    NULL, xname, "BMC2OS Packets received by host");
   3343 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3344 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3345 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3346 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3347 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3348 		    NULL, xname, "OS2BMC Packets received by BMC");
   3349 	}
   3350 #endif /* WM_EVENT_COUNTERS */
   3351 
   3352 	sc->sc_txrx_use_workqueue = false;
   3353 
   3354 	if (wm_phy_need_linkdown_discard(sc)) {
   3355 		DPRINTF(sc, WM_DEBUG_LINK,
   3356 		    ("%s: %s: Set linkdown discard flag\n",
   3357 			device_xname(sc->sc_dev), __func__));
   3358 		wm_set_linkdown_discard(sc);
   3359 	}
   3360 
   3361 	wm_init_sysctls(sc);
   3362 
   3363 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3364 		pmf_class_network_register(self, ifp);
   3365 	else
   3366 		aprint_error_dev(self, "couldn't establish power handler\n");
   3367 
   3368 	sc->sc_flags |= WM_F_ATTACHED;
   3369 out:
   3370 	return;
   3371 }
   3372 
   3373 /* The detach function (ca_detach) */
   3374 static int
   3375 wm_detach(device_t self, int flags __unused)
   3376 {
   3377 	struct wm_softc *sc = device_private(self);
   3378 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3379 	int i;
   3380 
   3381 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3382 		return 0;
   3383 
   3384 	/* Stop the interface. Callouts are stopped in it. */
   3385 	IFNET_LOCK(ifp);
   3386 	sc->sc_dying = true;
   3387 	wm_stop(ifp, 1);
   3388 	IFNET_UNLOCK(ifp);
   3389 
   3390 	pmf_device_deregister(self);
   3391 
   3392 	sysctl_teardown(&sc->sc_sysctllog);
   3393 
   3394 #ifdef WM_EVENT_COUNTERS
   3395 	evcnt_detach(&sc->sc_ev_linkintr);
   3396 
   3397 	evcnt_detach(&sc->sc_ev_crcerrs);
   3398 	evcnt_detach(&sc->sc_ev_symerrc);
   3399 	evcnt_detach(&sc->sc_ev_mpc);
   3400 	evcnt_detach(&sc->sc_ev_colc);
   3401 	evcnt_detach(&sc->sc_ev_sec);
   3402 	evcnt_detach(&sc->sc_ev_rlec);
   3403 
   3404 	if (sc->sc_type >= WM_T_82543) {
   3405 		evcnt_detach(&sc->sc_ev_algnerrc);
   3406 		evcnt_detach(&sc->sc_ev_rxerrc);
   3407 		evcnt_detach(&sc->sc_ev_cexterr);
   3408 
   3409 		evcnt_detach(&sc->sc_ev_tncrs);
   3410 		evcnt_detach(&sc->sc_ev_tsctc);
   3411 		evcnt_detach(&sc->sc_ev_tsctfc);
   3412 	}
   3413 
   3414 	if (sc->sc_type >= WM_T_82542_2_1) {
   3415 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3416 		evcnt_detach(&sc->sc_ev_tx_xon);
   3417 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3418 		evcnt_detach(&sc->sc_ev_rx_xon);
   3419 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3420 	}
   3421 
   3422 	evcnt_detach(&sc->sc_ev_scc);
   3423 	evcnt_detach(&sc->sc_ev_ecol);
   3424 	evcnt_detach(&sc->sc_ev_mcc);
   3425 	evcnt_detach(&sc->sc_ev_latecol);
   3426 	evcnt_detach(&sc->sc_ev_dc);
   3427 	evcnt_detach(&sc->sc_ev_prc64);
   3428 	evcnt_detach(&sc->sc_ev_prc127);
   3429 	evcnt_detach(&sc->sc_ev_prc255);
   3430 	evcnt_detach(&sc->sc_ev_prc511);
   3431 	evcnt_detach(&sc->sc_ev_prc1023);
   3432 	evcnt_detach(&sc->sc_ev_prc1522);
   3433 	evcnt_detach(&sc->sc_ev_gprc);
   3434 	evcnt_detach(&sc->sc_ev_bprc);
   3435 	evcnt_detach(&sc->sc_ev_mprc);
   3436 	evcnt_detach(&sc->sc_ev_gptc);
   3437 	evcnt_detach(&sc->sc_ev_gorc);
   3438 	evcnt_detach(&sc->sc_ev_gotc);
   3439 	evcnt_detach(&sc->sc_ev_rnbc);
   3440 	evcnt_detach(&sc->sc_ev_ruc);
   3441 	evcnt_detach(&sc->sc_ev_rfc);
   3442 	evcnt_detach(&sc->sc_ev_roc);
   3443 	evcnt_detach(&sc->sc_ev_rjc);
   3444 	if (sc->sc_type >= WM_T_82540) {
   3445 		evcnt_detach(&sc->sc_ev_mgtprc);
   3446 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3447 		evcnt_detach(&sc->sc_ev_mgtptc);
   3448 	}
   3449 	evcnt_detach(&sc->sc_ev_tor);
   3450 	evcnt_detach(&sc->sc_ev_tot);
   3451 	evcnt_detach(&sc->sc_ev_tpr);
   3452 	evcnt_detach(&sc->sc_ev_tpt);
   3453 	evcnt_detach(&sc->sc_ev_ptc64);
   3454 	evcnt_detach(&sc->sc_ev_ptc127);
   3455 	evcnt_detach(&sc->sc_ev_ptc255);
   3456 	evcnt_detach(&sc->sc_ev_ptc511);
   3457 	evcnt_detach(&sc->sc_ev_ptc1023);
   3458 	evcnt_detach(&sc->sc_ev_ptc1522);
   3459 	evcnt_detach(&sc->sc_ev_mptc);
   3460 	evcnt_detach(&sc->sc_ev_bptc);
   3461 	evcnt_detach(&sc->sc_ev_iac);
   3462 	if (sc->sc_type < WM_T_82575) {
   3463 		evcnt_detach(&sc->sc_ev_icrxptc);
   3464 		evcnt_detach(&sc->sc_ev_icrxatc);
   3465 		evcnt_detach(&sc->sc_ev_ictxptc);
   3466 		evcnt_detach(&sc->sc_ev_ictxact);
   3467 		evcnt_detach(&sc->sc_ev_ictxqec);
   3468 		evcnt_detach(&sc->sc_ev_ictxqmtc);
   3469 		evcnt_detach(&sc->sc_ev_icrxdmtc);
   3470 		evcnt_detach(&sc->sc_ev_icrxoc);
   3471 	}
   3472 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3473 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3474 		evcnt_detach(&sc->sc_ev_o2bspc);
   3475 		evcnt_detach(&sc->sc_ev_b2ospc);
   3476 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3477 	}
   3478 #endif /* WM_EVENT_COUNTERS */
   3479 
   3480 	rnd_detach_source(&sc->rnd_source);
   3481 
   3482 	/* Tell the firmware about the release */
   3483 	mutex_enter(sc->sc_core_lock);
   3484 	wm_release_manageability(sc);
   3485 	wm_release_hw_control(sc);
   3486 	wm_enable_wakeup(sc);
   3487 	mutex_exit(sc->sc_core_lock);
   3488 
   3489 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3490 
   3491 	ether_ifdetach(ifp);
   3492 	if_detach(ifp);
   3493 	if_percpuq_destroy(sc->sc_ipq);
   3494 
   3495 	/* Delete all remaining media. */
   3496 	ifmedia_fini(&sc->sc_mii.mii_media);
   3497 
   3498 	/* Unload RX dmamaps and free mbufs */
   3499 	for (i = 0; i < sc->sc_nqueues; i++) {
   3500 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3501 		mutex_enter(rxq->rxq_lock);
   3502 		wm_rxdrain(rxq);
   3503 		mutex_exit(rxq->rxq_lock);
   3504 	}
   3505 	/* Must unlock here */
   3506 
   3507 	/* Disestablish the interrupt handler */
   3508 	for (i = 0; i < sc->sc_nintrs; i++) {
   3509 		if (sc->sc_ihs[i] != NULL) {
   3510 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3511 			sc->sc_ihs[i] = NULL;
   3512 		}
   3513 	}
   3514 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3515 
   3516 	/* wm_stop() ensured that the workqueues are stopped. */
   3517 	workqueue_destroy(sc->sc_queue_wq);
   3518 	workqueue_destroy(sc->sc_reset_wq);
   3519 
   3520 	for (i = 0; i < sc->sc_nqueues; i++)
   3521 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3522 
   3523 	wm_free_txrx_queues(sc);
   3524 
   3525 	/* Unmap the registers */
   3526 	if (sc->sc_ss) {
   3527 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3528 		sc->sc_ss = 0;
   3529 	}
   3530 	if (sc->sc_ios) {
   3531 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3532 		sc->sc_ios = 0;
   3533 	}
   3534 	if (sc->sc_flashs) {
   3535 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3536 		sc->sc_flashs = 0;
   3537 	}
   3538 
   3539 	if (sc->sc_core_lock)
   3540 		mutex_obj_free(sc->sc_core_lock);
   3541 	if (sc->sc_ich_phymtx)
   3542 		mutex_obj_free(sc->sc_ich_phymtx);
   3543 	if (sc->sc_ich_nvmmtx)
   3544 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3545 
   3546 	return 0;
   3547 }
   3548 
   3549 static bool
   3550 wm_suspend(device_t self, const pmf_qual_t *qual)
   3551 {
   3552 	struct wm_softc *sc = device_private(self);
   3553 
   3554 	wm_release_manageability(sc);
   3555 	wm_release_hw_control(sc);
   3556 	wm_enable_wakeup(sc);
   3557 
   3558 	return true;
   3559 }
   3560 
   3561 static bool
   3562 wm_resume(device_t self, const pmf_qual_t *qual)
   3563 {
   3564 	struct wm_softc *sc = device_private(self);
   3565 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3566 	pcireg_t reg;
   3567 	char buf[256];
   3568 
   3569 	reg = CSR_READ(sc, WMREG_WUS);
   3570 	if (reg != 0) {
   3571 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3572 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3573 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3574 	}
   3575 
   3576 	if (sc->sc_type >= WM_T_PCH2)
   3577 		wm_resume_workarounds_pchlan(sc);
   3578 	IFNET_LOCK(ifp);
   3579 	if ((ifp->if_flags & IFF_UP) == 0) {
   3580 		/* >= PCH_SPT hardware workaround before reset. */
   3581 		if (sc->sc_type >= WM_T_PCH_SPT)
   3582 			wm_flush_desc_rings(sc);
   3583 
   3584 		wm_reset(sc);
   3585 		/* Non-AMT based hardware can now take control from firmware */
   3586 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3587 			wm_get_hw_control(sc);
   3588 		wm_init_manageability(sc);
   3589 	} else {
   3590 		/*
   3591 		 * We called pmf_class_network_register(), so if_init() is
   3592 		 * automatically called when IFF_UP. wm_reset(),
   3593 		 * wm_get_hw_control() and wm_init_manageability() are called
   3594 		 * via wm_init().
   3595 		 */
   3596 	}
   3597 	IFNET_UNLOCK(ifp);
   3598 
   3599 	return true;
   3600 }
   3601 
   3602 /*
   3603  * wm_watchdog:
   3604  *
   3605  *	Watchdog checker.
   3606  */
   3607 static bool
   3608 wm_watchdog(struct ifnet *ifp)
   3609 {
   3610 	int qid;
   3611 	struct wm_softc *sc = ifp->if_softc;
   3612 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3613 
   3614 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3615 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3616 
   3617 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3618 	}
   3619 
   3620 #ifdef WM_DEBUG
   3621 	if (sc->sc_trigger_reset) {
   3622 		/* debug operation, no need for atomicity or reliability */
   3623 		sc->sc_trigger_reset = 0;
   3624 		hang_queue++;
   3625 	}
   3626 #endif
   3627 
   3628 	if (hang_queue == 0)
   3629 		return true;
   3630 
   3631 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3632 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3633 
   3634 	return false;
   3635 }
   3636 
   3637 /*
   3638  * Perform an interface watchdog reset.
   3639  */
   3640 static void
   3641 wm_handle_reset_work(struct work *work, void *arg)
   3642 {
   3643 	struct wm_softc * const sc = arg;
   3644 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3645 
   3646 	/* Don't want ioctl operations to happen */
   3647 	IFNET_LOCK(ifp);
   3648 
   3649 	/* reset the interface. */
   3650 	wm_init(ifp);
   3651 
   3652 	IFNET_UNLOCK(ifp);
   3653 
   3654 	/*
   3655 	 * There are still some upper layer processing which call
   3656 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3657 	 */
   3658 	/* Try to get more packets going. */
   3659 	ifp->if_start(ifp);
   3660 
   3661 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3662 }
   3663 
   3664 
   3665 static void
   3666 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3667 {
   3668 
   3669 	mutex_enter(txq->txq_lock);
   3670 	if (txq->txq_sending &&
   3671 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3672 		wm_watchdog_txq_locked(ifp, txq, hang);
   3673 
   3674 	mutex_exit(txq->txq_lock);
   3675 }
   3676 
   3677 static void
   3678 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3679     uint16_t *hang)
   3680 {
   3681 	struct wm_softc *sc = ifp->if_softc;
   3682 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3683 
   3684 	KASSERT(mutex_owned(txq->txq_lock));
   3685 
   3686 	/*
   3687 	 * Since we're using delayed interrupts, sweep up
   3688 	 * before we report an error.
   3689 	 */
   3690 	wm_txeof(txq, UINT_MAX);
   3691 
   3692 	if (txq->txq_sending)
   3693 		*hang |= __BIT(wmq->wmq_id);
   3694 
   3695 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3696 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3697 		    device_xname(sc->sc_dev));
   3698 	} else {
   3699 #ifdef WM_DEBUG
   3700 		int i, j;
   3701 		struct wm_txsoft *txs;
   3702 #endif
   3703 		log(LOG_ERR,
   3704 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3705 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3706 		    txq->txq_next);
   3707 		if_statinc(ifp, if_oerrors);
   3708 #ifdef WM_DEBUG
   3709 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3710 		     i = WM_NEXTTXS(txq, i)) {
   3711 			txs = &txq->txq_soft[i];
   3712 			printf("txs %d tx %d -> %d\n",
   3713 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3714 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3715 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3716 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3717 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3718 					printf("\t %#08x%08x\n",
   3719 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3720 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3721 				} else {
   3722 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3723 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3724 					    txq->txq_descs[j].wtx_addr.wa_low);
   3725 					printf("\t %#04x%02x%02x%08x\n",
   3726 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3727 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3728 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3729 					    txq->txq_descs[j].wtx_cmdlen);
   3730 				}
   3731 				if (j == txs->txs_lastdesc)
   3732 					break;
   3733 			}
   3734 		}
   3735 #endif
   3736 	}
   3737 }
   3738 
   3739 /*
   3740  * wm_tick:
   3741  *
   3742  *	One second timer, used to check link status, sweep up
   3743  *	completed transmit jobs, etc.
   3744  */
   3745 static void
   3746 wm_tick(void *arg)
   3747 {
   3748 	struct wm_softc *sc = arg;
   3749 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3750 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   3751 	    cexterr;
   3752 
   3753 	mutex_enter(sc->sc_core_lock);
   3754 
   3755 	if (sc->sc_core_stopping) {
   3756 		mutex_exit(sc->sc_core_lock);
   3757 		return;
   3758 	}
   3759 
   3760 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   3761 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   3762 	mpc = CSR_READ(sc, WMREG_MPC);
   3763 	colc = CSR_READ(sc, WMREG_COLC);
   3764 	sec = CSR_READ(sc, WMREG_SEC);
   3765 	rlec = CSR_READ(sc, WMREG_RLEC);
   3766 
   3767 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   3768 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   3769 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   3770 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   3771 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   3772 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   3773 
   3774 	if (sc->sc_type >= WM_T_82543) {
   3775 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   3776 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   3777 		cexterr = CSR_READ(sc, WMREG_CEXTERR);
   3778 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   3779 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   3780 		WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   3781 
   3782 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   3783 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   3784 		WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
   3785 	} else
   3786 		algnerrc = rxerrc = cexterr = 0;
   3787 
   3788 	if (sc->sc_type >= WM_T_82542_2_1) {
   3789 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3790 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3791 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3792 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3793 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3794 	}
   3795 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   3796 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   3797 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   3798 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   3799 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   3800 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   3801 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   3802 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   3803 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   3804 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   3805 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   3806 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   3807 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   3808 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   3809 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   3810 
   3811 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   3812 	    CSR_READ(sc, WMREG_GORCL) +
   3813 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
   3814 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   3815 	    CSR_READ(sc, WMREG_GOTCL) +
   3816 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
   3817 
   3818 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   3819 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   3820 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   3821 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   3822 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   3823 
   3824 	if (sc->sc_type >= WM_T_82540) {
   3825 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   3826 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   3827 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   3828 	}
   3829 
   3830 	/*
   3831 	 * The TOR(L) register includes:
   3832 	 *  - Error
   3833 	 *  - Flow control
   3834 	 *  - Broadcast rejected (This note is described in 82574 and newer
   3835 	 *    datasheets. What does "broadcast rejected" mean?)
   3836 	 */
   3837 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   3838 	    CSR_READ(sc, WMREG_TORL) +
   3839 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
   3840 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   3841 	    CSR_READ(sc, WMREG_TOTL) +
   3842 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
   3843 
   3844 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   3845 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   3846 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   3847 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   3848 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   3849 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   3850 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   3851 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   3852 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   3853 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   3854 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   3855 	if (sc->sc_type < WM_T_82575) {
   3856 		WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   3857 		WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   3858 		WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   3859 		WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
   3860 		WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   3861 		WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
   3862 		    CSR_READ(sc, WMREG_ICTXQMTC));
   3863 		WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc,
   3864 		    CSR_READ(sc, WMREG_ICRXDMTC));
   3865 		WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   3866 	}
   3867 
   3868 	if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
   3869 	    && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
   3870 		WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
   3871 		WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
   3872 		WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
   3873 		WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
   3874 	}
   3875 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3876 	if_statadd_ref(nsr, if_collisions, colc);
   3877 	if_statadd_ref(nsr, if_ierrors,
   3878 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   3879 	/*
   3880 	 * WMREG_RNBC is incremented when there are no available buffers in
   3881 	 * host memory. It does not mean the number of dropped packets, because
   3882 	 * an Ethernet controller can receive packets in such case if there is
   3883 	 * space in the phy's FIFO.
   3884 	 *
   3885 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3886 	 * own EVCNT instead of if_iqdrops.
   3887 	 */
   3888 	if_statadd_ref(nsr, if_iqdrops, mpc);
   3889 	IF_STAT_PUTREF(ifp);
   3890 
   3891 	if (sc->sc_flags & WM_F_HAS_MII)
   3892 		mii_tick(&sc->sc_mii);
   3893 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3894 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3895 		wm_serdes_tick(sc);
   3896 	else
   3897 		wm_tbi_tick(sc);
   3898 
   3899 	mutex_exit(sc->sc_core_lock);
   3900 
   3901 	if (wm_watchdog(ifp))
   3902 		callout_schedule(&sc->sc_tick_ch, hz);
   3903 }
   3904 
   3905 static int
   3906 wm_ifflags_cb(struct ethercom *ec)
   3907 {
   3908 	struct ifnet *ifp = &ec->ec_if;
   3909 	struct wm_softc *sc = ifp->if_softc;
   3910 	u_short iffchange;
   3911 	int ecchange;
   3912 	bool needreset = false;
   3913 	int rc = 0;
   3914 
   3915 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3916 		device_xname(sc->sc_dev), __func__));
   3917 
   3918 	KASSERT(IFNET_LOCKED(ifp));
   3919 
   3920 	mutex_enter(sc->sc_core_lock);
   3921 
   3922 	/*
   3923 	 * Check for if_flags.
   3924 	 * Main usage is to prevent linkdown when opening bpf.
   3925 	 */
   3926 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3927 	sc->sc_if_flags = ifp->if_flags;
   3928 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3929 		needreset = true;
   3930 		goto ec;
   3931 	}
   3932 
   3933 	/* iff related updates */
   3934 	if ((iffchange & IFF_PROMISC) != 0)
   3935 		wm_set_filter(sc);
   3936 
   3937 	wm_set_vlan(sc);
   3938 
   3939 ec:
   3940 	/* Check for ec_capenable. */
   3941 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3942 	sc->sc_ec_capenable = ec->ec_capenable;
   3943 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3944 		needreset = true;
   3945 		goto out;
   3946 	}
   3947 
   3948 	/* ec related updates */
   3949 	wm_set_eee(sc);
   3950 
   3951 out:
   3952 	if (needreset)
   3953 		rc = ENETRESET;
   3954 	mutex_exit(sc->sc_core_lock);
   3955 
   3956 	return rc;
   3957 }
   3958 
   3959 static bool
   3960 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3961 {
   3962 
   3963 	switch (sc->sc_phytype) {
   3964 	case WMPHY_82577: /* ihphy */
   3965 	case WMPHY_82578: /* atphy */
   3966 	case WMPHY_82579: /* ihphy */
   3967 	case WMPHY_I217: /* ihphy */
   3968 	case WMPHY_82580: /* ihphy */
   3969 	case WMPHY_I350: /* ihphy */
   3970 		return true;
   3971 	default:
   3972 		return false;
   3973 	}
   3974 }
   3975 
   3976 static void
   3977 wm_set_linkdown_discard(struct wm_softc *sc)
   3978 {
   3979 
   3980 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3981 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3982 
   3983 		mutex_enter(txq->txq_lock);
   3984 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3985 		mutex_exit(txq->txq_lock);
   3986 	}
   3987 }
   3988 
   3989 static void
   3990 wm_clear_linkdown_discard(struct wm_softc *sc)
   3991 {
   3992 
   3993 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3994 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3995 
   3996 		mutex_enter(txq->txq_lock);
   3997 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3998 		mutex_exit(txq->txq_lock);
   3999 	}
   4000 }
   4001 
   4002 /*
   4003  * wm_ioctl:		[ifnet interface function]
   4004  *
   4005  *	Handle control requests from the operator.
   4006  */
   4007 static int
   4008 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   4009 {
   4010 	struct wm_softc *sc = ifp->if_softc;
   4011 	struct ifreq *ifr = (struct ifreq *)data;
   4012 	struct ifaddr *ifa = (struct ifaddr *)data;
   4013 	struct sockaddr_dl *sdl;
   4014 	int error;
   4015 
   4016 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4017 		device_xname(sc->sc_dev), __func__));
   4018 
   4019 	switch (cmd) {
   4020 	case SIOCADDMULTI:
   4021 	case SIOCDELMULTI:
   4022 		break;
   4023 	default:
   4024 		KASSERT(IFNET_LOCKED(ifp));
   4025 	}
   4026 
   4027 	switch (cmd) {
   4028 	case SIOCSIFMEDIA:
   4029 		mutex_enter(sc->sc_core_lock);
   4030 		/* Flow control requires full-duplex mode. */
   4031 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4032 		    (ifr->ifr_media & IFM_FDX) == 0)
   4033 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4034 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4035 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4036 				/* We can do both TXPAUSE and RXPAUSE. */
   4037 				ifr->ifr_media |=
   4038 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4039 			}
   4040 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4041 		}
   4042 		mutex_exit(sc->sc_core_lock);
   4043 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4044 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4045 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4046 				DPRINTF(sc, WM_DEBUG_LINK,
   4047 				    ("%s: %s: Set linkdown discard flag\n",
   4048 					device_xname(sc->sc_dev), __func__));
   4049 				wm_set_linkdown_discard(sc);
   4050 			}
   4051 		}
   4052 		break;
   4053 	case SIOCINITIFADDR:
   4054 		mutex_enter(sc->sc_core_lock);
   4055 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4056 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4057 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4058 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4059 			/* Unicast address is the first multicast entry */
   4060 			wm_set_filter(sc);
   4061 			error = 0;
   4062 			mutex_exit(sc->sc_core_lock);
   4063 			break;
   4064 		}
   4065 		mutex_exit(sc->sc_core_lock);
   4066 		/*FALLTHROUGH*/
   4067 	default:
   4068 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4069 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4070 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4071 				DPRINTF(sc, WM_DEBUG_LINK,
   4072 				    ("%s: %s: Set linkdown discard flag\n",
   4073 					device_xname(sc->sc_dev), __func__));
   4074 				wm_set_linkdown_discard(sc);
   4075 			}
   4076 		}
   4077 		const int s = splnet();
   4078 		/* It may call wm_start, so unlock here */
   4079 		error = ether_ioctl(ifp, cmd, data);
   4080 		splx(s);
   4081 		if (error != ENETRESET)
   4082 			break;
   4083 
   4084 		error = 0;
   4085 
   4086 		if (cmd == SIOCSIFCAP)
   4087 			error = if_init(ifp);
   4088 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4089 			mutex_enter(sc->sc_core_lock);
   4090 			if (sc->sc_if_flags & IFF_RUNNING) {
   4091 				/*
   4092 				 * Multicast list has changed; set the
   4093 				 * hardware filter accordingly.
   4094 				 */
   4095 				wm_set_filter(sc);
   4096 			}
   4097 			mutex_exit(sc->sc_core_lock);
   4098 		}
   4099 		break;
   4100 	}
   4101 
   4102 	return error;
   4103 }
   4104 
   4105 /* MAC address related */
   4106 
   4107 /*
   4108  * Get the offset of MAC address and return it.
   4109  * If error occured, use offset 0.
   4110  */
   4111 static uint16_t
   4112 wm_check_alt_mac_addr(struct wm_softc *sc)
   4113 {
   4114 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4115 	uint16_t offset = NVM_OFF_MACADDR;
   4116 
   4117 	/* Try to read alternative MAC address pointer */
   4118 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4119 		return 0;
   4120 
   4121 	/* Check pointer if it's valid or not. */
   4122 	if ((offset == 0x0000) || (offset == 0xffff))
   4123 		return 0;
   4124 
   4125 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4126 	/*
   4127 	 * Check whether alternative MAC address is valid or not.
   4128 	 * Some cards have non 0xffff pointer but those don't use
   4129 	 * alternative MAC address in reality.
   4130 	 *
   4131 	 * Check whether the broadcast bit is set or not.
   4132 	 */
   4133 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4134 		if (((myea[0] & 0xff) & 0x01) == 0)
   4135 			return offset; /* Found */
   4136 
   4137 	/* Not found */
   4138 	return 0;
   4139 }
   4140 
   4141 static int
   4142 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4143 {
   4144 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4145 	uint16_t offset = NVM_OFF_MACADDR;
   4146 	int do_invert = 0;
   4147 
   4148 	switch (sc->sc_type) {
   4149 	case WM_T_82580:
   4150 	case WM_T_I350:
   4151 	case WM_T_I354:
   4152 		/* EEPROM Top Level Partitioning */
   4153 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4154 		break;
   4155 	case WM_T_82571:
   4156 	case WM_T_82575:
   4157 	case WM_T_82576:
   4158 	case WM_T_80003:
   4159 	case WM_T_I210:
   4160 	case WM_T_I211:
   4161 		offset = wm_check_alt_mac_addr(sc);
   4162 		if (offset == 0)
   4163 			if ((sc->sc_funcid & 0x01) == 1)
   4164 				do_invert = 1;
   4165 		break;
   4166 	default:
   4167 		if ((sc->sc_funcid & 0x01) == 1)
   4168 			do_invert = 1;
   4169 		break;
   4170 	}
   4171 
   4172 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4173 		goto bad;
   4174 
   4175 	enaddr[0] = myea[0] & 0xff;
   4176 	enaddr[1] = myea[0] >> 8;
   4177 	enaddr[2] = myea[1] & 0xff;
   4178 	enaddr[3] = myea[1] >> 8;
   4179 	enaddr[4] = myea[2] & 0xff;
   4180 	enaddr[5] = myea[2] >> 8;
   4181 
   4182 	/*
   4183 	 * Toggle the LSB of the MAC address on the second port
   4184 	 * of some dual port cards.
   4185 	 */
   4186 	if (do_invert != 0)
   4187 		enaddr[5] ^= 1;
   4188 
   4189 	return 0;
   4190 
   4191 bad:
   4192 	return -1;
   4193 }
   4194 
   4195 /*
   4196  * wm_set_ral:
   4197  *
   4198  *	Set an entery in the receive address list.
   4199  */
   4200 static void
   4201 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4202 {
   4203 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4204 	uint32_t wlock_mac;
   4205 	int rv;
   4206 
   4207 	if (enaddr != NULL) {
   4208 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4209 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4210 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4211 		ral_hi |= RAL_AV;
   4212 	} else {
   4213 		ral_lo = 0;
   4214 		ral_hi = 0;
   4215 	}
   4216 
   4217 	switch (sc->sc_type) {
   4218 	case WM_T_82542_2_0:
   4219 	case WM_T_82542_2_1:
   4220 	case WM_T_82543:
   4221 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4222 		CSR_WRITE_FLUSH(sc);
   4223 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4224 		CSR_WRITE_FLUSH(sc);
   4225 		break;
   4226 	case WM_T_PCH2:
   4227 	case WM_T_PCH_LPT:
   4228 	case WM_T_PCH_SPT:
   4229 	case WM_T_PCH_CNP:
   4230 		if (idx == 0) {
   4231 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4232 			CSR_WRITE_FLUSH(sc);
   4233 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4234 			CSR_WRITE_FLUSH(sc);
   4235 			return;
   4236 		}
   4237 		if (sc->sc_type != WM_T_PCH2) {
   4238 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4239 			    FWSM_WLOCK_MAC);
   4240 			addrl = WMREG_SHRAL(idx - 1);
   4241 			addrh = WMREG_SHRAH(idx - 1);
   4242 		} else {
   4243 			wlock_mac = 0;
   4244 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4245 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4246 		}
   4247 
   4248 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4249 			rv = wm_get_swflag_ich8lan(sc);
   4250 			if (rv != 0)
   4251 				return;
   4252 			CSR_WRITE(sc, addrl, ral_lo);
   4253 			CSR_WRITE_FLUSH(sc);
   4254 			CSR_WRITE(sc, addrh, ral_hi);
   4255 			CSR_WRITE_FLUSH(sc);
   4256 			wm_put_swflag_ich8lan(sc);
   4257 		}
   4258 
   4259 		break;
   4260 	default:
   4261 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4262 		CSR_WRITE_FLUSH(sc);
   4263 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4264 		CSR_WRITE_FLUSH(sc);
   4265 		break;
   4266 	}
   4267 }
   4268 
   4269 /*
   4270  * wm_mchash:
   4271  *
   4272  *	Compute the hash of the multicast address for the 4096-bit
   4273  *	multicast filter.
   4274  */
   4275 static uint32_t
   4276 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4277 {
   4278 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4279 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4280 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4281 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4282 	uint32_t hash;
   4283 
   4284 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4285 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4286 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4287 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4288 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4289 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4290 		return (hash & 0x3ff);
   4291 	}
   4292 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4293 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4294 
   4295 	return (hash & 0xfff);
   4296 }
   4297 
   4298 /*
   4299  *
   4300  *
   4301  */
   4302 static int
   4303 wm_rar_count(struct wm_softc *sc)
   4304 {
   4305 	int size;
   4306 
   4307 	switch (sc->sc_type) {
   4308 	case WM_T_ICH8:
   4309 		size = WM_RAL_TABSIZE_ICH8 -1;
   4310 		break;
   4311 	case WM_T_ICH9:
   4312 	case WM_T_ICH10:
   4313 	case WM_T_PCH:
   4314 		size = WM_RAL_TABSIZE_ICH8;
   4315 		break;
   4316 	case WM_T_PCH2:
   4317 		size = WM_RAL_TABSIZE_PCH2;
   4318 		break;
   4319 	case WM_T_PCH_LPT:
   4320 	case WM_T_PCH_SPT:
   4321 	case WM_T_PCH_CNP:
   4322 		size = WM_RAL_TABSIZE_PCH_LPT;
   4323 		break;
   4324 	case WM_T_82575:
   4325 	case WM_T_I210:
   4326 	case WM_T_I211:
   4327 		size = WM_RAL_TABSIZE_82575;
   4328 		break;
   4329 	case WM_T_82576:
   4330 	case WM_T_82580:
   4331 		size = WM_RAL_TABSIZE_82576;
   4332 		break;
   4333 	case WM_T_I350:
   4334 	case WM_T_I354:
   4335 		size = WM_RAL_TABSIZE_I350;
   4336 		break;
   4337 	default:
   4338 		size = WM_RAL_TABSIZE;
   4339 	}
   4340 
   4341 	return size;
   4342 }
   4343 
   4344 /*
   4345  * wm_set_filter:
   4346  *
   4347  *	Set up the receive filter.
   4348  */
   4349 static void
   4350 wm_set_filter(struct wm_softc *sc)
   4351 {
   4352 	struct ethercom *ec = &sc->sc_ethercom;
   4353 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4354 	struct ether_multi *enm;
   4355 	struct ether_multistep step;
   4356 	bus_addr_t mta_reg;
   4357 	uint32_t hash, reg, bit;
   4358 	int i, size, ralmax, rv;
   4359 
   4360 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4361 		device_xname(sc->sc_dev), __func__));
   4362 	KASSERT(mutex_owned(sc->sc_core_lock));
   4363 
   4364 	if (sc->sc_type >= WM_T_82544)
   4365 		mta_reg = WMREG_CORDOVA_MTA;
   4366 	else
   4367 		mta_reg = WMREG_MTA;
   4368 
   4369 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4370 
   4371 	if (sc->sc_if_flags & IFF_BROADCAST)
   4372 		sc->sc_rctl |= RCTL_BAM;
   4373 	if (sc->sc_if_flags & IFF_PROMISC) {
   4374 		sc->sc_rctl |= RCTL_UPE;
   4375 		ETHER_LOCK(ec);
   4376 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4377 		ETHER_UNLOCK(ec);
   4378 		goto allmulti;
   4379 	}
   4380 
   4381 	/*
   4382 	 * Set the station address in the first RAL slot, and
   4383 	 * clear the remaining slots.
   4384 	 */
   4385 	size = wm_rar_count(sc);
   4386 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4387 
   4388 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4389 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4390 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4391 		switch (i) {
   4392 		case 0:
   4393 			/* We can use all entries */
   4394 			ralmax = size;
   4395 			break;
   4396 		case 1:
   4397 			/* Only RAR[0] */
   4398 			ralmax = 1;
   4399 			break;
   4400 		default:
   4401 			/* Available SHRA + RAR[0] */
   4402 			ralmax = i + 1;
   4403 		}
   4404 	} else
   4405 		ralmax = size;
   4406 	for (i = 1; i < size; i++) {
   4407 		if (i < ralmax)
   4408 			wm_set_ral(sc, NULL, i);
   4409 	}
   4410 
   4411 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4412 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4413 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4414 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4415 		size = WM_ICH8_MC_TABSIZE;
   4416 	else
   4417 		size = WM_MC_TABSIZE;
   4418 	/* Clear out the multicast table. */
   4419 	for (i = 0; i < size; i++) {
   4420 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4421 		CSR_WRITE_FLUSH(sc);
   4422 	}
   4423 
   4424 	ETHER_LOCK(ec);
   4425 	ETHER_FIRST_MULTI(step, ec, enm);
   4426 	while (enm != NULL) {
   4427 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4428 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4429 			ETHER_UNLOCK(ec);
   4430 			/*
   4431 			 * We must listen to a range of multicast addresses.
   4432 			 * For now, just accept all multicasts, rather than
   4433 			 * trying to set only those filter bits needed to match
   4434 			 * the range.  (At this time, the only use of address
   4435 			 * ranges is for IP multicast routing, for which the
   4436 			 * range is big enough to require all bits set.)
   4437 			 */
   4438 			goto allmulti;
   4439 		}
   4440 
   4441 		hash = wm_mchash(sc, enm->enm_addrlo);
   4442 
   4443 		reg = (hash >> 5);
   4444 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4445 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4446 		    || (sc->sc_type == WM_T_PCH2)
   4447 		    || (sc->sc_type == WM_T_PCH_LPT)
   4448 		    || (sc->sc_type == WM_T_PCH_SPT)
   4449 		    || (sc->sc_type == WM_T_PCH_CNP))
   4450 			reg &= 0x1f;
   4451 		else
   4452 			reg &= 0x7f;
   4453 		bit = hash & 0x1f;
   4454 
   4455 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4456 		hash |= 1U << bit;
   4457 
   4458 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4459 			/*
   4460 			 * 82544 Errata 9: Certain register cannot be written
   4461 			 * with particular alignments in PCI-X bus operation
   4462 			 * (FCAH, MTA and VFTA).
   4463 			 */
   4464 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4465 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4466 			CSR_WRITE_FLUSH(sc);
   4467 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4468 			CSR_WRITE_FLUSH(sc);
   4469 		} else {
   4470 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4471 			CSR_WRITE_FLUSH(sc);
   4472 		}
   4473 
   4474 		ETHER_NEXT_MULTI(step, enm);
   4475 	}
   4476 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4477 	ETHER_UNLOCK(ec);
   4478 
   4479 	goto setit;
   4480 
   4481 allmulti:
   4482 	sc->sc_rctl |= RCTL_MPE;
   4483 
   4484 setit:
   4485 	if (sc->sc_type >= WM_T_PCH2) {
   4486 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4487 		    && (ifp->if_mtu > ETHERMTU))
   4488 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4489 		else
   4490 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4491 		if (rv != 0)
   4492 			device_printf(sc->sc_dev,
   4493 			    "Failed to do workaround for jumbo frame.\n");
   4494 	}
   4495 
   4496 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4497 }
   4498 
   4499 /* Reset and init related */
   4500 
   4501 static void
   4502 wm_set_vlan(struct wm_softc *sc)
   4503 {
   4504 
   4505 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4506 		device_xname(sc->sc_dev), __func__));
   4507 
   4508 	/* Deal with VLAN enables. */
   4509 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4510 		sc->sc_ctrl |= CTRL_VME;
   4511 	else
   4512 		sc->sc_ctrl &= ~CTRL_VME;
   4513 
   4514 	/* Write the control registers. */
   4515 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4516 }
   4517 
   4518 static void
   4519 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4520 {
   4521 	uint32_t gcr;
   4522 	pcireg_t ctrl2;
   4523 
   4524 	gcr = CSR_READ(sc, WMREG_GCR);
   4525 
   4526 	/* Only take action if timeout value is defaulted to 0 */
   4527 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4528 		goto out;
   4529 
   4530 	if ((gcr & GCR_CAP_VER2) == 0) {
   4531 		gcr |= GCR_CMPL_TMOUT_10MS;
   4532 		goto out;
   4533 	}
   4534 
   4535 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4536 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4537 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4538 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4539 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4540 
   4541 out:
   4542 	/* Disable completion timeout resend */
   4543 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4544 
   4545 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4546 }
   4547 
   4548 void
   4549 wm_get_auto_rd_done(struct wm_softc *sc)
   4550 {
   4551 	int i;
   4552 
   4553 	/* wait for eeprom to reload */
   4554 	switch (sc->sc_type) {
   4555 	case WM_T_82571:
   4556 	case WM_T_82572:
   4557 	case WM_T_82573:
   4558 	case WM_T_82574:
   4559 	case WM_T_82583:
   4560 	case WM_T_82575:
   4561 	case WM_T_82576:
   4562 	case WM_T_82580:
   4563 	case WM_T_I350:
   4564 	case WM_T_I354:
   4565 	case WM_T_I210:
   4566 	case WM_T_I211:
   4567 	case WM_T_80003:
   4568 	case WM_T_ICH8:
   4569 	case WM_T_ICH9:
   4570 		for (i = 0; i < 10; i++) {
   4571 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4572 				break;
   4573 			delay(1000);
   4574 		}
   4575 		if (i == 10) {
   4576 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4577 			    "complete\n", device_xname(sc->sc_dev));
   4578 		}
   4579 		break;
   4580 	default:
   4581 		break;
   4582 	}
   4583 }
   4584 
   4585 void
   4586 wm_lan_init_done(struct wm_softc *sc)
   4587 {
   4588 	uint32_t reg = 0;
   4589 	int i;
   4590 
   4591 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4592 		device_xname(sc->sc_dev), __func__));
   4593 
   4594 	/* Wait for eeprom to reload */
   4595 	switch (sc->sc_type) {
   4596 	case WM_T_ICH10:
   4597 	case WM_T_PCH:
   4598 	case WM_T_PCH2:
   4599 	case WM_T_PCH_LPT:
   4600 	case WM_T_PCH_SPT:
   4601 	case WM_T_PCH_CNP:
   4602 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4603 			reg = CSR_READ(sc, WMREG_STATUS);
   4604 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4605 				break;
   4606 			delay(100);
   4607 		}
   4608 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4609 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4610 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4611 		}
   4612 		break;
   4613 	default:
   4614 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4615 		    __func__);
   4616 		break;
   4617 	}
   4618 
   4619 	reg &= ~STATUS_LAN_INIT_DONE;
   4620 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4621 }
   4622 
   4623 void
   4624 wm_get_cfg_done(struct wm_softc *sc)
   4625 {
   4626 	int mask;
   4627 	uint32_t reg;
   4628 	int i;
   4629 
   4630 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4631 		device_xname(sc->sc_dev), __func__));
   4632 
   4633 	/* Wait for eeprom to reload */
   4634 	switch (sc->sc_type) {
   4635 	case WM_T_82542_2_0:
   4636 	case WM_T_82542_2_1:
   4637 		/* null */
   4638 		break;
   4639 	case WM_T_82543:
   4640 	case WM_T_82544:
   4641 	case WM_T_82540:
   4642 	case WM_T_82545:
   4643 	case WM_T_82545_3:
   4644 	case WM_T_82546:
   4645 	case WM_T_82546_3:
   4646 	case WM_T_82541:
   4647 	case WM_T_82541_2:
   4648 	case WM_T_82547:
   4649 	case WM_T_82547_2:
   4650 	case WM_T_82573:
   4651 	case WM_T_82574:
   4652 	case WM_T_82583:
   4653 		/* generic */
   4654 		delay(10*1000);
   4655 		break;
   4656 	case WM_T_80003:
   4657 	case WM_T_82571:
   4658 	case WM_T_82572:
   4659 	case WM_T_82575:
   4660 	case WM_T_82576:
   4661 	case WM_T_82580:
   4662 	case WM_T_I350:
   4663 	case WM_T_I354:
   4664 	case WM_T_I210:
   4665 	case WM_T_I211:
   4666 		if (sc->sc_type == WM_T_82571) {
   4667 			/* Only 82571 shares port 0 */
   4668 			mask = EEMNGCTL_CFGDONE_0;
   4669 		} else
   4670 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4671 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4672 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4673 				break;
   4674 			delay(1000);
   4675 		}
   4676 		if (i >= WM_PHY_CFG_TIMEOUT)
   4677 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4678 				device_xname(sc->sc_dev), __func__));
   4679 		break;
   4680 	case WM_T_ICH8:
   4681 	case WM_T_ICH9:
   4682 	case WM_T_ICH10:
   4683 	case WM_T_PCH:
   4684 	case WM_T_PCH2:
   4685 	case WM_T_PCH_LPT:
   4686 	case WM_T_PCH_SPT:
   4687 	case WM_T_PCH_CNP:
   4688 		delay(10*1000);
   4689 		if (sc->sc_type >= WM_T_ICH10)
   4690 			wm_lan_init_done(sc);
   4691 		else
   4692 			wm_get_auto_rd_done(sc);
   4693 
   4694 		/* Clear PHY Reset Asserted bit */
   4695 		reg = CSR_READ(sc, WMREG_STATUS);
   4696 		if ((reg & STATUS_PHYRA) != 0)
   4697 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4698 		break;
   4699 	default:
   4700 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4701 		    __func__);
   4702 		break;
   4703 	}
   4704 }
   4705 
   4706 int
   4707 wm_phy_post_reset(struct wm_softc *sc)
   4708 {
   4709 	device_t dev = sc->sc_dev;
   4710 	uint16_t reg;
   4711 	int rv = 0;
   4712 
   4713 	/* This function is only for ICH8 and newer. */
   4714 	if (sc->sc_type < WM_T_ICH8)
   4715 		return 0;
   4716 
   4717 	if (wm_phy_resetisblocked(sc)) {
   4718 		/* XXX */
   4719 		device_printf(dev, "PHY is blocked\n");
   4720 		return -1;
   4721 	}
   4722 
   4723 	/* Allow time for h/w to get to quiescent state after reset */
   4724 	delay(10*1000);
   4725 
   4726 	/* Perform any necessary post-reset workarounds */
   4727 	if (sc->sc_type == WM_T_PCH)
   4728 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4729 	else if (sc->sc_type == WM_T_PCH2)
   4730 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4731 	if (rv != 0)
   4732 		return rv;
   4733 
   4734 	/* Clear the host wakeup bit after lcd reset */
   4735 	if (sc->sc_type >= WM_T_PCH) {
   4736 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4737 		reg &= ~BM_WUC_HOST_WU_BIT;
   4738 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4739 	}
   4740 
   4741 	/* Configure the LCD with the extended configuration region in NVM */
   4742 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4743 		return rv;
   4744 
   4745 	/* Configure the LCD with the OEM bits in NVM */
   4746 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4747 
   4748 	if (sc->sc_type == WM_T_PCH2) {
   4749 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4750 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4751 			delay(10 * 1000);
   4752 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4753 		}
   4754 		/* Set EEE LPI Update Timer to 200usec */
   4755 		rv = sc->phy.acquire(sc);
   4756 		if (rv)
   4757 			return rv;
   4758 		rv = wm_write_emi_reg_locked(dev,
   4759 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4760 		sc->phy.release(sc);
   4761 	}
   4762 
   4763 	return rv;
   4764 }
   4765 
   4766 /* Only for PCH and newer */
   4767 static int
   4768 wm_write_smbus_addr(struct wm_softc *sc)
   4769 {
   4770 	uint32_t strap, freq;
   4771 	uint16_t phy_data;
   4772 	int rv;
   4773 
   4774 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4775 		device_xname(sc->sc_dev), __func__));
   4776 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4777 
   4778 	strap = CSR_READ(sc, WMREG_STRAP);
   4779 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4780 
   4781 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4782 	if (rv != 0)
   4783 		return rv;
   4784 
   4785 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4786 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4787 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4788 
   4789 	if (sc->sc_phytype == WMPHY_I217) {
   4790 		/* Restore SMBus frequency */
   4791 		if (freq --) {
   4792 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4793 			    | HV_SMB_ADDR_FREQ_HIGH);
   4794 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4795 			    HV_SMB_ADDR_FREQ_LOW);
   4796 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4797 			    HV_SMB_ADDR_FREQ_HIGH);
   4798 		} else
   4799 			DPRINTF(sc, WM_DEBUG_INIT,
   4800 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4801 				device_xname(sc->sc_dev), __func__));
   4802 	}
   4803 
   4804 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4805 	    phy_data);
   4806 }
   4807 
   4808 static int
   4809 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4810 {
   4811 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4812 	uint16_t phy_page = 0;
   4813 	int rv = 0;
   4814 
   4815 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4816 		device_xname(sc->sc_dev), __func__));
   4817 
   4818 	switch (sc->sc_type) {
   4819 	case WM_T_ICH8:
   4820 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4821 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4822 			return 0;
   4823 
   4824 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4825 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4826 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4827 			break;
   4828 		}
   4829 		/* FALLTHROUGH */
   4830 	case WM_T_PCH:
   4831 	case WM_T_PCH2:
   4832 	case WM_T_PCH_LPT:
   4833 	case WM_T_PCH_SPT:
   4834 	case WM_T_PCH_CNP:
   4835 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4836 		break;
   4837 	default:
   4838 		return 0;
   4839 	}
   4840 
   4841 	if ((rv = sc->phy.acquire(sc)) != 0)
   4842 		return rv;
   4843 
   4844 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4845 	if ((reg & sw_cfg_mask) == 0)
   4846 		goto release;
   4847 
   4848 	/*
   4849 	 * Make sure HW does not configure LCD from PHY extended configuration
   4850 	 * before SW configuration
   4851 	 */
   4852 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4853 	if ((sc->sc_type < WM_T_PCH2)
   4854 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4855 		goto release;
   4856 
   4857 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4858 		device_xname(sc->sc_dev), __func__));
   4859 	/* word_addr is in DWORD */
   4860 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4861 
   4862 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4863 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4864 	if (cnf_size == 0)
   4865 		goto release;
   4866 
   4867 	if (((sc->sc_type == WM_T_PCH)
   4868 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4869 	    || (sc->sc_type > WM_T_PCH)) {
   4870 		/*
   4871 		 * HW configures the SMBus address and LEDs when the OEM and
   4872 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4873 		 * are cleared, SW will configure them instead.
   4874 		 */
   4875 		DPRINTF(sc, WM_DEBUG_INIT,
   4876 		    ("%s: %s: Configure SMBus and LED\n",
   4877 			device_xname(sc->sc_dev), __func__));
   4878 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4879 			goto release;
   4880 
   4881 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4882 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4883 		    (uint16_t)reg);
   4884 		if (rv != 0)
   4885 			goto release;
   4886 	}
   4887 
   4888 	/* Configure LCD from extended configuration region. */
   4889 	for (i = 0; i < cnf_size; i++) {
   4890 		uint16_t reg_data, reg_addr;
   4891 
   4892 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4893 			goto release;
   4894 
   4895 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4896 			goto release;
   4897 
   4898 		if (reg_addr == IGPHY_PAGE_SELECT)
   4899 			phy_page = reg_data;
   4900 
   4901 		reg_addr &= IGPHY_MAXREGADDR;
   4902 		reg_addr |= phy_page;
   4903 
   4904 		KASSERT(sc->phy.writereg_locked != NULL);
   4905 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4906 		    reg_data);
   4907 	}
   4908 
   4909 release:
   4910 	sc->phy.release(sc);
   4911 	return rv;
   4912 }
   4913 
   4914 /*
   4915  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4916  *  @sc:       pointer to the HW structure
   4917  *  @d0_state: boolean if entering d0 or d3 device state
   4918  *
   4919  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4920  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4921  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4922  */
   4923 int
   4924 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4925 {
   4926 	uint32_t mac_reg;
   4927 	uint16_t oem_reg;
   4928 	int rv;
   4929 
   4930 	if (sc->sc_type < WM_T_PCH)
   4931 		return 0;
   4932 
   4933 	rv = sc->phy.acquire(sc);
   4934 	if (rv != 0)
   4935 		return rv;
   4936 
   4937 	if (sc->sc_type == WM_T_PCH) {
   4938 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4939 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4940 			goto release;
   4941 	}
   4942 
   4943 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4944 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4945 		goto release;
   4946 
   4947 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4948 
   4949 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4950 	if (rv != 0)
   4951 		goto release;
   4952 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4953 
   4954 	if (d0_state) {
   4955 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4956 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4957 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4958 			oem_reg |= HV_OEM_BITS_LPLU;
   4959 	} else {
   4960 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4961 		    != 0)
   4962 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4963 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4964 		    != 0)
   4965 			oem_reg |= HV_OEM_BITS_LPLU;
   4966 	}
   4967 
   4968 	/* Set Restart auto-neg to activate the bits */
   4969 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4970 	    && (wm_phy_resetisblocked(sc) == false))
   4971 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4972 
   4973 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4974 
   4975 release:
   4976 	sc->phy.release(sc);
   4977 
   4978 	return rv;
   4979 }
   4980 
   4981 /* Init hardware bits */
   4982 void
   4983 wm_initialize_hardware_bits(struct wm_softc *sc)
   4984 {
   4985 	uint32_t tarc0, tarc1, reg;
   4986 
   4987 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4988 		device_xname(sc->sc_dev), __func__));
   4989 
   4990 	/* For 82571 variant, 80003 and ICHs */
   4991 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4992 	    || (sc->sc_type >= WM_T_80003)) {
   4993 
   4994 		/* Transmit Descriptor Control 0 */
   4995 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4996 		reg |= TXDCTL_COUNT_DESC;
   4997 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4998 
   4999 		/* Transmit Descriptor Control 1 */
   5000 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   5001 		reg |= TXDCTL_COUNT_DESC;
   5002 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   5003 
   5004 		/* TARC0 */
   5005 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   5006 		switch (sc->sc_type) {
   5007 		case WM_T_82571:
   5008 		case WM_T_82572:
   5009 		case WM_T_82573:
   5010 		case WM_T_82574:
   5011 		case WM_T_82583:
   5012 		case WM_T_80003:
   5013 			/* Clear bits 30..27 */
   5014 			tarc0 &= ~__BITS(30, 27);
   5015 			break;
   5016 		default:
   5017 			break;
   5018 		}
   5019 
   5020 		switch (sc->sc_type) {
   5021 		case WM_T_82571:
   5022 		case WM_T_82572:
   5023 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5024 
   5025 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5026 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5027 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5028 			/* 8257[12] Errata No.7 */
   5029 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5030 
   5031 			/* TARC1 bit 28 */
   5032 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5033 				tarc1 &= ~__BIT(28);
   5034 			else
   5035 				tarc1 |= __BIT(28);
   5036 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5037 
   5038 			/*
   5039 			 * 8257[12] Errata No.13
   5040 			 * Disable Dyamic Clock Gating.
   5041 			 */
   5042 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5043 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5044 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5045 			break;
   5046 		case WM_T_82573:
   5047 		case WM_T_82574:
   5048 		case WM_T_82583:
   5049 			if ((sc->sc_type == WM_T_82574)
   5050 			    || (sc->sc_type == WM_T_82583))
   5051 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5052 
   5053 			/* Extended Device Control */
   5054 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5055 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5056 			reg |= __BIT(22);	/* Set bit 22 */
   5057 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5058 
   5059 			/* Device Control */
   5060 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5061 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5062 
   5063 			/* PCIe Control Register */
   5064 			/*
   5065 			 * 82573 Errata (unknown).
   5066 			 *
   5067 			 * 82574 Errata 25 and 82583 Errata 12
   5068 			 * "Dropped Rx Packets":
   5069 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5070 			 */
   5071 			reg = CSR_READ(sc, WMREG_GCR);
   5072 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5073 			CSR_WRITE(sc, WMREG_GCR, reg);
   5074 
   5075 			if ((sc->sc_type == WM_T_82574)
   5076 			    || (sc->sc_type == WM_T_82583)) {
   5077 				/*
   5078 				 * Document says this bit must be set for
   5079 				 * proper operation.
   5080 				 */
   5081 				reg = CSR_READ(sc, WMREG_GCR);
   5082 				reg |= __BIT(22);
   5083 				CSR_WRITE(sc, WMREG_GCR, reg);
   5084 
   5085 				/*
   5086 				 * Apply workaround for hardware errata
   5087 				 * documented in errata docs Fixes issue where
   5088 				 * some error prone or unreliable PCIe
   5089 				 * completions are occurring, particularly
   5090 				 * with ASPM enabled. Without fix, issue can
   5091 				 * cause Tx timeouts.
   5092 				 */
   5093 				reg = CSR_READ(sc, WMREG_GCR2);
   5094 				reg |= __BIT(0);
   5095 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5096 			}
   5097 			break;
   5098 		case WM_T_80003:
   5099 			/* TARC0 */
   5100 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5101 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5102 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5103 
   5104 			/* TARC1 bit 28 */
   5105 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5106 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5107 				tarc1 &= ~__BIT(28);
   5108 			else
   5109 				tarc1 |= __BIT(28);
   5110 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5111 			break;
   5112 		case WM_T_ICH8:
   5113 		case WM_T_ICH9:
   5114 		case WM_T_ICH10:
   5115 		case WM_T_PCH:
   5116 		case WM_T_PCH2:
   5117 		case WM_T_PCH_LPT:
   5118 		case WM_T_PCH_SPT:
   5119 		case WM_T_PCH_CNP:
   5120 			/* TARC0 */
   5121 			if (sc->sc_type == WM_T_ICH8) {
   5122 				/* Set TARC0 bits 29 and 28 */
   5123 				tarc0 |= __BITS(29, 28);
   5124 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5125 				tarc0 |= __BIT(29);
   5126 				/*
   5127 				 *  Drop bit 28. From Linux.
   5128 				 * See I218/I219 spec update
   5129 				 * "5. Buffer Overrun While the I219 is
   5130 				 * Processing DMA Transactions"
   5131 				 */
   5132 				tarc0 &= ~__BIT(28);
   5133 			}
   5134 			/* Set TARC0 bits 23,24,26,27 */
   5135 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5136 
   5137 			/* CTRL_EXT */
   5138 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5139 			reg |= __BIT(22);	/* Set bit 22 */
   5140 			/*
   5141 			 * Enable PHY low-power state when MAC is at D3
   5142 			 * w/o WoL
   5143 			 */
   5144 			if (sc->sc_type >= WM_T_PCH)
   5145 				reg |= CTRL_EXT_PHYPDEN;
   5146 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5147 
   5148 			/* TARC1 */
   5149 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5150 			/* bit 28 */
   5151 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5152 				tarc1 &= ~__BIT(28);
   5153 			else
   5154 				tarc1 |= __BIT(28);
   5155 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5156 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5157 
   5158 			/* Device Status */
   5159 			if (sc->sc_type == WM_T_ICH8) {
   5160 				reg = CSR_READ(sc, WMREG_STATUS);
   5161 				reg &= ~__BIT(31);
   5162 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5163 
   5164 			}
   5165 
   5166 			/* IOSFPC */
   5167 			if (sc->sc_type == WM_T_PCH_SPT) {
   5168 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5169 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5170 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5171 			}
   5172 			/*
   5173 			 * Work-around descriptor data corruption issue during
   5174 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5175 			 * capability.
   5176 			 */
   5177 			reg = CSR_READ(sc, WMREG_RFCTL);
   5178 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5179 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5180 			break;
   5181 		default:
   5182 			break;
   5183 		}
   5184 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5185 
   5186 		switch (sc->sc_type) {
   5187 		case WM_T_82571:
   5188 		case WM_T_82572:
   5189 		case WM_T_82573:
   5190 		case WM_T_80003:
   5191 		case WM_T_ICH8:
   5192 			/*
   5193 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
   5194 			 * others to avoid RSS Hash Value bug.
   5195 			 */
   5196 			reg = CSR_READ(sc, WMREG_RFCTL);
   5197 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5198 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5199 			break;
   5200 		case WM_T_82574:
   5201 			/* Use extened Rx descriptor. */
   5202 			reg = CSR_READ(sc, WMREG_RFCTL);
   5203 			reg |= WMREG_RFCTL_EXSTEN;
   5204 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5205 			break;
   5206 		default:
   5207 			break;
   5208 		}
   5209 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5210 		/*
   5211 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5212 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5213 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5214 		 * Correctly by the Device"
   5215 		 *
   5216 		 * I354(C2000) Errata AVR53:
   5217 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5218 		 * Hang"
   5219 		 */
   5220 		reg = CSR_READ(sc, WMREG_RFCTL);
   5221 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5222 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5223 	}
   5224 }
   5225 
   5226 static uint32_t
   5227 wm_rxpbs_adjust_82580(uint32_t val)
   5228 {
   5229 	uint32_t rv = 0;
   5230 
   5231 	if (val < __arraycount(wm_82580_rxpbs_table))
   5232 		rv = wm_82580_rxpbs_table[val];
   5233 
   5234 	return rv;
   5235 }
   5236 
   5237 /*
   5238  * wm_reset_phy:
   5239  *
   5240  *	generic PHY reset function.
   5241  *	Same as e1000_phy_hw_reset_generic()
   5242  */
   5243 static int
   5244 wm_reset_phy(struct wm_softc *sc)
   5245 {
   5246 	uint32_t reg;
   5247 	int rv;
   5248 
   5249 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5250 		device_xname(sc->sc_dev), __func__));
   5251 	if (wm_phy_resetisblocked(sc))
   5252 		return -1;
   5253 
   5254 	rv = sc->phy.acquire(sc);
   5255 	if (rv) {
   5256 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5257 		    __func__, rv);
   5258 		return rv;
   5259 	}
   5260 
   5261 	reg = CSR_READ(sc, WMREG_CTRL);
   5262 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5263 	CSR_WRITE_FLUSH(sc);
   5264 
   5265 	delay(sc->phy.reset_delay_us);
   5266 
   5267 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5268 	CSR_WRITE_FLUSH(sc);
   5269 
   5270 	delay(150);
   5271 
   5272 	sc->phy.release(sc);
   5273 
   5274 	wm_get_cfg_done(sc);
   5275 	wm_phy_post_reset(sc);
   5276 
   5277 	return 0;
   5278 }
   5279 
   5280 /*
   5281  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5282  *
   5283  * In i219, the descriptor rings must be emptied before resetting the HW
   5284  * or before changing the device state to D3 during runtime (runtime PM).
   5285  *
   5286  * Failure to do this will cause the HW to enter a unit hang state which can
   5287  * only be released by PCI reset on the device.
   5288  *
   5289  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5290  */
   5291 static void
   5292 wm_flush_desc_rings(struct wm_softc *sc)
   5293 {
   5294 	pcireg_t preg;
   5295 	uint32_t reg;
   5296 	struct wm_txqueue *txq;
   5297 	wiseman_txdesc_t *txd;
   5298 	int nexttx;
   5299 	uint32_t rctl;
   5300 
   5301 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5302 
   5303 	/* First, disable MULR fix in FEXTNVM11 */
   5304 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5305 	reg |= FEXTNVM11_DIS_MULRFIX;
   5306 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5307 
   5308 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5309 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5310 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5311 		return;
   5312 
   5313 	/*
   5314 	 * Remove all descriptors from the tx_ring.
   5315 	 *
   5316 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5317 	 * happens when the HW reads the regs. We assign the ring itself as
   5318 	 * the data of the next descriptor. We don't care about the data we are
   5319 	 * about to reset the HW.
   5320 	 */
   5321 #ifdef WM_DEBUG
   5322 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5323 #endif
   5324 	reg = CSR_READ(sc, WMREG_TCTL);
   5325 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5326 
   5327 	txq = &sc->sc_queue[0].wmq_txq;
   5328 	nexttx = txq->txq_next;
   5329 	txd = &txq->txq_descs[nexttx];
   5330 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5331 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5332 	txd->wtx_fields.wtxu_status = 0;
   5333 	txd->wtx_fields.wtxu_options = 0;
   5334 	txd->wtx_fields.wtxu_vlan = 0;
   5335 
   5336 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5337 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5338 
   5339 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5340 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5341 	CSR_WRITE_FLUSH(sc);
   5342 	delay(250);
   5343 
   5344 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5345 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5346 		return;
   5347 
   5348 	/*
   5349 	 * Mark all descriptors in the RX ring as consumed and disable the
   5350 	 * rx ring.
   5351 	 */
   5352 #ifdef WM_DEBUG
   5353 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5354 #endif
   5355 	rctl = CSR_READ(sc, WMREG_RCTL);
   5356 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5357 	CSR_WRITE_FLUSH(sc);
   5358 	delay(150);
   5359 
   5360 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5361 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5362 	reg &= 0xffffc000;
   5363 	/*
   5364 	 * Update thresholds: prefetch threshold to 31, host threshold
   5365 	 * to 1 and make sure the granularity is "descriptors" and not
   5366 	 * "cache lines"
   5367 	 */
   5368 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5369 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5370 
   5371 	/* Momentarily enable the RX ring for the changes to take effect */
   5372 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5373 	CSR_WRITE_FLUSH(sc);
   5374 	delay(150);
   5375 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5376 }
   5377 
   5378 /*
   5379  * wm_reset:
   5380  *
   5381  *	Reset the i82542 chip.
   5382  */
   5383 static void
   5384 wm_reset(struct wm_softc *sc)
   5385 {
   5386 	int phy_reset = 0;
   5387 	int i, error = 0;
   5388 	uint32_t reg;
   5389 	uint16_t kmreg;
   5390 	int rv;
   5391 
   5392 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5393 		device_xname(sc->sc_dev), __func__));
   5394 	KASSERT(sc->sc_type != 0);
   5395 
   5396 	/*
   5397 	 * Allocate on-chip memory according to the MTU size.
   5398 	 * The Packet Buffer Allocation register must be written
   5399 	 * before the chip is reset.
   5400 	 */
   5401 	switch (sc->sc_type) {
   5402 	case WM_T_82547:
   5403 	case WM_T_82547_2:
   5404 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5405 		    PBA_22K : PBA_30K;
   5406 		for (i = 0; i < sc->sc_nqueues; i++) {
   5407 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5408 			txq->txq_fifo_head = 0;
   5409 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5410 			txq->txq_fifo_size =
   5411 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5412 			txq->txq_fifo_stall = 0;
   5413 		}
   5414 		break;
   5415 	case WM_T_82571:
   5416 	case WM_T_82572:
   5417 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5418 	case WM_T_80003:
   5419 		sc->sc_pba = PBA_32K;
   5420 		break;
   5421 	case WM_T_82573:
   5422 		sc->sc_pba = PBA_12K;
   5423 		break;
   5424 	case WM_T_82574:
   5425 	case WM_T_82583:
   5426 		sc->sc_pba = PBA_20K;
   5427 		break;
   5428 	case WM_T_82576:
   5429 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5430 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5431 		break;
   5432 	case WM_T_82580:
   5433 	case WM_T_I350:
   5434 	case WM_T_I354:
   5435 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5436 		break;
   5437 	case WM_T_I210:
   5438 	case WM_T_I211:
   5439 		sc->sc_pba = PBA_34K;
   5440 		break;
   5441 	case WM_T_ICH8:
   5442 		/* Workaround for a bit corruption issue in FIFO memory */
   5443 		sc->sc_pba = PBA_8K;
   5444 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5445 		break;
   5446 	case WM_T_ICH9:
   5447 	case WM_T_ICH10:
   5448 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5449 		    PBA_14K : PBA_10K;
   5450 		break;
   5451 	case WM_T_PCH:
   5452 	case WM_T_PCH2:	/* XXX 14K? */
   5453 	case WM_T_PCH_LPT:
   5454 	case WM_T_PCH_SPT:
   5455 	case WM_T_PCH_CNP:
   5456 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5457 		    PBA_12K : PBA_26K;
   5458 		break;
   5459 	default:
   5460 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5461 		    PBA_40K : PBA_48K;
   5462 		break;
   5463 	}
   5464 	/*
   5465 	 * Only old or non-multiqueue devices have the PBA register
   5466 	 * XXX Need special handling for 82575.
   5467 	 */
   5468 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5469 	    || (sc->sc_type == WM_T_82575))
   5470 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5471 
   5472 	/* Prevent the PCI-E bus from sticking */
   5473 	if (sc->sc_flags & WM_F_PCIE) {
   5474 		int timeout = 800;
   5475 
   5476 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5477 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5478 
   5479 		while (timeout--) {
   5480 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5481 			    == 0)
   5482 				break;
   5483 			delay(100);
   5484 		}
   5485 		if (timeout == 0)
   5486 			device_printf(sc->sc_dev,
   5487 			    "failed to disable bus mastering\n");
   5488 	}
   5489 
   5490 	/* Set the completion timeout for interface */
   5491 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5492 	    || (sc->sc_type == WM_T_82580)
   5493 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5494 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5495 		wm_set_pcie_completion_timeout(sc);
   5496 
   5497 	/* Clear interrupt */
   5498 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5499 	if (wm_is_using_msix(sc)) {
   5500 		if (sc->sc_type != WM_T_82574) {
   5501 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5502 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5503 		} else
   5504 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5505 	}
   5506 
   5507 	/* Stop the transmit and receive processes. */
   5508 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5509 	sc->sc_rctl &= ~RCTL_EN;
   5510 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5511 	CSR_WRITE_FLUSH(sc);
   5512 
   5513 	/* XXX set_tbi_sbp_82543() */
   5514 
   5515 	delay(10*1000);
   5516 
   5517 	/* Must acquire the MDIO ownership before MAC reset */
   5518 	switch (sc->sc_type) {
   5519 	case WM_T_82573:
   5520 	case WM_T_82574:
   5521 	case WM_T_82583:
   5522 		error = wm_get_hw_semaphore_82573(sc);
   5523 		break;
   5524 	default:
   5525 		break;
   5526 	}
   5527 
   5528 	/*
   5529 	 * 82541 Errata 29? & 82547 Errata 28?
   5530 	 * See also the description about PHY_RST bit in CTRL register
   5531 	 * in 8254x_GBe_SDM.pdf.
   5532 	 */
   5533 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5534 		CSR_WRITE(sc, WMREG_CTRL,
   5535 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5536 		CSR_WRITE_FLUSH(sc);
   5537 		delay(5000);
   5538 	}
   5539 
   5540 	switch (sc->sc_type) {
   5541 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5542 	case WM_T_82541:
   5543 	case WM_T_82541_2:
   5544 	case WM_T_82547:
   5545 	case WM_T_82547_2:
   5546 		/*
   5547 		 * On some chipsets, a reset through a memory-mapped write
   5548 		 * cycle can cause the chip to reset before completing the
   5549 		 * write cycle. This causes major headache that can be avoided
   5550 		 * by issuing the reset via indirect register writes through
   5551 		 * I/O space.
   5552 		 *
   5553 		 * So, if we successfully mapped the I/O BAR at attach time,
   5554 		 * use that. Otherwise, try our luck with a memory-mapped
   5555 		 * reset.
   5556 		 */
   5557 		if (sc->sc_flags & WM_F_IOH_VALID)
   5558 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5559 		else
   5560 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5561 		break;
   5562 	case WM_T_82545_3:
   5563 	case WM_T_82546_3:
   5564 		/* Use the shadow control register on these chips. */
   5565 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5566 		break;
   5567 	case WM_T_80003:
   5568 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5569 		if (sc->phy.acquire(sc) != 0)
   5570 			break;
   5571 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5572 		sc->phy.release(sc);
   5573 		break;
   5574 	case WM_T_ICH8:
   5575 	case WM_T_ICH9:
   5576 	case WM_T_ICH10:
   5577 	case WM_T_PCH:
   5578 	case WM_T_PCH2:
   5579 	case WM_T_PCH_LPT:
   5580 	case WM_T_PCH_SPT:
   5581 	case WM_T_PCH_CNP:
   5582 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5583 		if (wm_phy_resetisblocked(sc) == false) {
   5584 			/*
   5585 			 * Gate automatic PHY configuration by hardware on
   5586 			 * non-managed 82579
   5587 			 */
   5588 			if ((sc->sc_type == WM_T_PCH2)
   5589 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5590 				== 0))
   5591 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5592 
   5593 			reg |= CTRL_PHY_RESET;
   5594 			phy_reset = 1;
   5595 		} else
   5596 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5597 		if (sc->phy.acquire(sc) != 0)
   5598 			break;
   5599 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5600 		/* Don't insert a completion barrier when reset */
   5601 		delay(20*1000);
   5602 		/*
   5603 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
   5604 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
   5605 		 * only. See also wm_get_swflag_ich8lan().
   5606 		 */
   5607 		mutex_exit(sc->sc_ich_phymtx);
   5608 		break;
   5609 	case WM_T_82580:
   5610 	case WM_T_I350:
   5611 	case WM_T_I354:
   5612 	case WM_T_I210:
   5613 	case WM_T_I211:
   5614 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5615 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5616 			CSR_WRITE_FLUSH(sc);
   5617 		delay(5000);
   5618 		break;
   5619 	case WM_T_82542_2_0:
   5620 	case WM_T_82542_2_1:
   5621 	case WM_T_82543:
   5622 	case WM_T_82540:
   5623 	case WM_T_82545:
   5624 	case WM_T_82546:
   5625 	case WM_T_82571:
   5626 	case WM_T_82572:
   5627 	case WM_T_82573:
   5628 	case WM_T_82574:
   5629 	case WM_T_82575:
   5630 	case WM_T_82576:
   5631 	case WM_T_82583:
   5632 	default:
   5633 		/* Everything else can safely use the documented method. */
   5634 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5635 		break;
   5636 	}
   5637 
   5638 	/* Must release the MDIO ownership after MAC reset */
   5639 	switch (sc->sc_type) {
   5640 	case WM_T_82573:
   5641 	case WM_T_82574:
   5642 	case WM_T_82583:
   5643 		if (error == 0)
   5644 			wm_put_hw_semaphore_82573(sc);
   5645 		break;
   5646 	default:
   5647 		break;
   5648 	}
   5649 
   5650 	/* Set Phy Config Counter to 50msec */
   5651 	if (sc->sc_type == WM_T_PCH2) {
   5652 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5653 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5654 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5655 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5656 	}
   5657 
   5658 	if (phy_reset != 0)
   5659 		wm_get_cfg_done(sc);
   5660 
   5661 	/* Reload EEPROM */
   5662 	switch (sc->sc_type) {
   5663 	case WM_T_82542_2_0:
   5664 	case WM_T_82542_2_1:
   5665 	case WM_T_82543:
   5666 	case WM_T_82544:
   5667 		delay(10);
   5668 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5669 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5670 		CSR_WRITE_FLUSH(sc);
   5671 		delay(2000);
   5672 		break;
   5673 	case WM_T_82540:
   5674 	case WM_T_82545:
   5675 	case WM_T_82545_3:
   5676 	case WM_T_82546:
   5677 	case WM_T_82546_3:
   5678 		delay(5*1000);
   5679 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5680 		break;
   5681 	case WM_T_82541:
   5682 	case WM_T_82541_2:
   5683 	case WM_T_82547:
   5684 	case WM_T_82547_2:
   5685 		delay(20000);
   5686 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5687 		break;
   5688 	case WM_T_82571:
   5689 	case WM_T_82572:
   5690 	case WM_T_82573:
   5691 	case WM_T_82574:
   5692 	case WM_T_82583:
   5693 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5694 			delay(10);
   5695 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5696 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5697 			CSR_WRITE_FLUSH(sc);
   5698 		}
   5699 		/* check EECD_EE_AUTORD */
   5700 		wm_get_auto_rd_done(sc);
   5701 		/*
   5702 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5703 		 * is set.
   5704 		 */
   5705 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5706 		    || (sc->sc_type == WM_T_82583))
   5707 			delay(25*1000);
   5708 		break;
   5709 	case WM_T_82575:
   5710 	case WM_T_82576:
   5711 	case WM_T_82580:
   5712 	case WM_T_I350:
   5713 	case WM_T_I354:
   5714 	case WM_T_I210:
   5715 	case WM_T_I211:
   5716 	case WM_T_80003:
   5717 		/* check EECD_EE_AUTORD */
   5718 		wm_get_auto_rd_done(sc);
   5719 		break;
   5720 	case WM_T_ICH8:
   5721 	case WM_T_ICH9:
   5722 	case WM_T_ICH10:
   5723 	case WM_T_PCH:
   5724 	case WM_T_PCH2:
   5725 	case WM_T_PCH_LPT:
   5726 	case WM_T_PCH_SPT:
   5727 	case WM_T_PCH_CNP:
   5728 		break;
   5729 	default:
   5730 		panic("%s: unknown type\n", __func__);
   5731 	}
   5732 
   5733 	/* Check whether EEPROM is present or not */
   5734 	switch (sc->sc_type) {
   5735 	case WM_T_82575:
   5736 	case WM_T_82576:
   5737 	case WM_T_82580:
   5738 	case WM_T_I350:
   5739 	case WM_T_I354:
   5740 	case WM_T_ICH8:
   5741 	case WM_T_ICH9:
   5742 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5743 			/* Not found */
   5744 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5745 			if (sc->sc_type == WM_T_82575)
   5746 				wm_reset_init_script_82575(sc);
   5747 		}
   5748 		break;
   5749 	default:
   5750 		break;
   5751 	}
   5752 
   5753 	if (phy_reset != 0)
   5754 		wm_phy_post_reset(sc);
   5755 
   5756 	if ((sc->sc_type == WM_T_82580)
   5757 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5758 		/* Clear global device reset status bit */
   5759 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5760 	}
   5761 
   5762 	/* Clear any pending interrupt events. */
   5763 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5764 	reg = CSR_READ(sc, WMREG_ICR);
   5765 	if (wm_is_using_msix(sc)) {
   5766 		if (sc->sc_type != WM_T_82574) {
   5767 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5768 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5769 		} else
   5770 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5771 	}
   5772 
   5773 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5774 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5775 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5776 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5777 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5778 		reg |= KABGTXD_BGSQLBIAS;
   5779 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5780 	}
   5781 
   5782 	/* Reload sc_ctrl */
   5783 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5784 
   5785 	wm_set_eee(sc);
   5786 
   5787 	/*
   5788 	 * For PCH, this write will make sure that any noise will be detected
   5789 	 * as a CRC error and be dropped rather than show up as a bad packet
   5790 	 * to the DMA engine
   5791 	 */
   5792 	if (sc->sc_type == WM_T_PCH)
   5793 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5794 
   5795 	if (sc->sc_type >= WM_T_82544)
   5796 		CSR_WRITE(sc, WMREG_WUC, 0);
   5797 
   5798 	if (sc->sc_type < WM_T_82575)
   5799 		wm_disable_aspm(sc); /* Workaround for some chips */
   5800 
   5801 	wm_reset_mdicnfg_82580(sc);
   5802 
   5803 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5804 		wm_pll_workaround_i210(sc);
   5805 
   5806 	if (sc->sc_type == WM_T_80003) {
   5807 		/* Default to TRUE to enable the MDIC W/A */
   5808 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5809 
   5810 		rv = wm_kmrn_readreg(sc,
   5811 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5812 		if (rv == 0) {
   5813 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5814 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5815 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5816 			else
   5817 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5818 		}
   5819 	}
   5820 }
   5821 
   5822 /*
   5823  * wm_add_rxbuf:
   5824  *
   5825  *	Add a receive buffer to the indiciated descriptor.
   5826  */
   5827 static int
   5828 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5829 {
   5830 	struct wm_softc *sc = rxq->rxq_sc;
   5831 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5832 	struct mbuf *m;
   5833 	int error;
   5834 
   5835 	KASSERT(mutex_owned(rxq->rxq_lock));
   5836 
   5837 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5838 	if (m == NULL)
   5839 		return ENOBUFS;
   5840 
   5841 	MCLGET(m, M_DONTWAIT);
   5842 	if ((m->m_flags & M_EXT) == 0) {
   5843 		m_freem(m);
   5844 		return ENOBUFS;
   5845 	}
   5846 
   5847 	if (rxs->rxs_mbuf != NULL)
   5848 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5849 
   5850 	rxs->rxs_mbuf = m;
   5851 
   5852 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5853 	/*
   5854 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5855 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5856 	 */
   5857 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5858 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5859 	if (error) {
   5860 		/* XXX XXX XXX */
   5861 		aprint_error_dev(sc->sc_dev,
   5862 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5863 		panic("wm_add_rxbuf");
   5864 	}
   5865 
   5866 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5867 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5868 
   5869 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5870 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5871 			wm_init_rxdesc(rxq, idx);
   5872 	} else
   5873 		wm_init_rxdesc(rxq, idx);
   5874 
   5875 	return 0;
   5876 }
   5877 
   5878 /*
   5879  * wm_rxdrain:
   5880  *
   5881  *	Drain the receive queue.
   5882  */
   5883 static void
   5884 wm_rxdrain(struct wm_rxqueue *rxq)
   5885 {
   5886 	struct wm_softc *sc = rxq->rxq_sc;
   5887 	struct wm_rxsoft *rxs;
   5888 	int i;
   5889 
   5890 	KASSERT(mutex_owned(rxq->rxq_lock));
   5891 
   5892 	for (i = 0; i < WM_NRXDESC; i++) {
   5893 		rxs = &rxq->rxq_soft[i];
   5894 		if (rxs->rxs_mbuf != NULL) {
   5895 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5896 			m_freem(rxs->rxs_mbuf);
   5897 			rxs->rxs_mbuf = NULL;
   5898 		}
   5899 	}
   5900 }
   5901 
   5902 /*
   5903  * Setup registers for RSS.
   5904  *
   5905  * XXX not yet VMDq support
   5906  */
   5907 static void
   5908 wm_init_rss(struct wm_softc *sc)
   5909 {
   5910 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5911 	int i;
   5912 
   5913 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5914 
   5915 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5916 		unsigned int qid, reta_ent;
   5917 
   5918 		qid  = i % sc->sc_nqueues;
   5919 		switch (sc->sc_type) {
   5920 		case WM_T_82574:
   5921 			reta_ent = __SHIFTIN(qid,
   5922 			    RETA_ENT_QINDEX_MASK_82574);
   5923 			break;
   5924 		case WM_T_82575:
   5925 			reta_ent = __SHIFTIN(qid,
   5926 			    RETA_ENT_QINDEX1_MASK_82575);
   5927 			break;
   5928 		default:
   5929 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5930 			break;
   5931 		}
   5932 
   5933 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5934 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5935 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5936 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5937 	}
   5938 
   5939 	rss_getkey((uint8_t *)rss_key);
   5940 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5941 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5942 
   5943 	if (sc->sc_type == WM_T_82574)
   5944 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5945 	else
   5946 		mrqc = MRQC_ENABLE_RSS_MQ;
   5947 
   5948 	/*
   5949 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5950 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5951 	 */
   5952 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5953 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5954 #if 0
   5955 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5956 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5957 #endif
   5958 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5959 
   5960 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5961 }
   5962 
   5963 /*
   5964  * Adjust TX and RX queue numbers which the system actulally uses.
   5965  *
   5966  * The numbers are affected by below parameters.
   5967  *     - The nubmer of hardware queues
   5968  *     - The number of MSI-X vectors (= "nvectors" argument)
   5969  *     - ncpu
   5970  */
   5971 static void
   5972 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5973 {
   5974 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5975 
   5976 	if (nvectors < 2) {
   5977 		sc->sc_nqueues = 1;
   5978 		return;
   5979 	}
   5980 
   5981 	switch (sc->sc_type) {
   5982 	case WM_T_82572:
   5983 		hw_ntxqueues = 2;
   5984 		hw_nrxqueues = 2;
   5985 		break;
   5986 	case WM_T_82574:
   5987 		hw_ntxqueues = 2;
   5988 		hw_nrxqueues = 2;
   5989 		break;
   5990 	case WM_T_82575:
   5991 		hw_ntxqueues = 4;
   5992 		hw_nrxqueues = 4;
   5993 		break;
   5994 	case WM_T_82576:
   5995 		hw_ntxqueues = 16;
   5996 		hw_nrxqueues = 16;
   5997 		break;
   5998 	case WM_T_82580:
   5999 	case WM_T_I350:
   6000 	case WM_T_I354:
   6001 		hw_ntxqueues = 8;
   6002 		hw_nrxqueues = 8;
   6003 		break;
   6004 	case WM_T_I210:
   6005 		hw_ntxqueues = 4;
   6006 		hw_nrxqueues = 4;
   6007 		break;
   6008 	case WM_T_I211:
   6009 		hw_ntxqueues = 2;
   6010 		hw_nrxqueues = 2;
   6011 		break;
   6012 		/*
   6013 		 * The below Ethernet controllers do not support MSI-X;
   6014 		 * this driver doesn't let them use multiqueue.
   6015 		 *     - WM_T_80003
   6016 		 *     - WM_T_ICH8
   6017 		 *     - WM_T_ICH9
   6018 		 *     - WM_T_ICH10
   6019 		 *     - WM_T_PCH
   6020 		 *     - WM_T_PCH2
   6021 		 *     - WM_T_PCH_LPT
   6022 		 */
   6023 	default:
   6024 		hw_ntxqueues = 1;
   6025 		hw_nrxqueues = 1;
   6026 		break;
   6027 	}
   6028 
   6029 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6030 
   6031 	/*
   6032 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6033 	 * the number of queues used actually.
   6034 	 */
   6035 	if (nvectors < hw_nqueues + 1)
   6036 		sc->sc_nqueues = nvectors - 1;
   6037 	else
   6038 		sc->sc_nqueues = hw_nqueues;
   6039 
   6040 	/*
   6041 	 * As queues more than CPUs cannot improve scaling, we limit
   6042 	 * the number of queues used actually.
   6043 	 */
   6044 	if (ncpu < sc->sc_nqueues)
   6045 		sc->sc_nqueues = ncpu;
   6046 }
   6047 
   6048 static inline bool
   6049 wm_is_using_msix(struct wm_softc *sc)
   6050 {
   6051 
   6052 	return (sc->sc_nintrs > 1);
   6053 }
   6054 
   6055 static inline bool
   6056 wm_is_using_multiqueue(struct wm_softc *sc)
   6057 {
   6058 
   6059 	return (sc->sc_nqueues > 1);
   6060 }
   6061 
   6062 static int
   6063 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6064 {
   6065 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6066 
   6067 	wmq->wmq_id = qidx;
   6068 	wmq->wmq_intr_idx = intr_idx;
   6069 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6070 	    wm_handle_queue, wmq);
   6071 	if (wmq->wmq_si != NULL)
   6072 		return 0;
   6073 
   6074 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6075 	    wmq->wmq_id);
   6076 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6077 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6078 	return ENOMEM;
   6079 }
   6080 
   6081 /*
   6082  * Both single interrupt MSI and INTx can use this function.
   6083  */
   6084 static int
   6085 wm_setup_legacy(struct wm_softc *sc)
   6086 {
   6087 	pci_chipset_tag_t pc = sc->sc_pc;
   6088 	const char *intrstr = NULL;
   6089 	char intrbuf[PCI_INTRSTR_LEN];
   6090 	int error;
   6091 
   6092 	error = wm_alloc_txrx_queues(sc);
   6093 	if (error) {
   6094 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6095 		    error);
   6096 		return ENOMEM;
   6097 	}
   6098 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6099 	    sizeof(intrbuf));
   6100 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6101 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6102 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6103 	if (sc->sc_ihs[0] == NULL) {
   6104 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6105 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6106 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6107 		return ENOMEM;
   6108 	}
   6109 
   6110 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6111 	sc->sc_nintrs = 1;
   6112 
   6113 	return wm_softint_establish_queue(sc, 0, 0);
   6114 }
   6115 
   6116 static int
   6117 wm_setup_msix(struct wm_softc *sc)
   6118 {
   6119 	void *vih;
   6120 	kcpuset_t *affinity;
   6121 	int qidx, error, intr_idx, txrx_established;
   6122 	pci_chipset_tag_t pc = sc->sc_pc;
   6123 	const char *intrstr = NULL;
   6124 	char intrbuf[PCI_INTRSTR_LEN];
   6125 	char intr_xname[INTRDEVNAMEBUF];
   6126 
   6127 	if (sc->sc_nqueues < ncpu) {
   6128 		/*
   6129 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6130 		 * interrupts start from CPU#1.
   6131 		 */
   6132 		sc->sc_affinity_offset = 1;
   6133 	} else {
   6134 		/*
   6135 		 * In this case, this device use all CPUs. So, we unify
   6136 		 * affinitied cpu_index to msix vector number for readability.
   6137 		 */
   6138 		sc->sc_affinity_offset = 0;
   6139 	}
   6140 
   6141 	error = wm_alloc_txrx_queues(sc);
   6142 	if (error) {
   6143 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6144 		    error);
   6145 		return ENOMEM;
   6146 	}
   6147 
   6148 	kcpuset_create(&affinity, false);
   6149 	intr_idx = 0;
   6150 
   6151 	/*
   6152 	 * TX and RX
   6153 	 */
   6154 	txrx_established = 0;
   6155 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6156 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6157 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6158 
   6159 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6160 		    sizeof(intrbuf));
   6161 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6162 		    PCI_INTR_MPSAFE, true);
   6163 		memset(intr_xname, 0, sizeof(intr_xname));
   6164 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6165 		    device_xname(sc->sc_dev), qidx);
   6166 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6167 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6168 		if (vih == NULL) {
   6169 			aprint_error_dev(sc->sc_dev,
   6170 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6171 			    intrstr ? " at " : "",
   6172 			    intrstr ? intrstr : "");
   6173 
   6174 			goto fail;
   6175 		}
   6176 		kcpuset_zero(affinity);
   6177 		/* Round-robin affinity */
   6178 		kcpuset_set(affinity, affinity_to);
   6179 		error = interrupt_distribute(vih, affinity, NULL);
   6180 		if (error == 0) {
   6181 			aprint_normal_dev(sc->sc_dev,
   6182 			    "for TX and RX interrupting at %s affinity to %u\n",
   6183 			    intrstr, affinity_to);
   6184 		} else {
   6185 			aprint_normal_dev(sc->sc_dev,
   6186 			    "for TX and RX interrupting at %s\n", intrstr);
   6187 		}
   6188 		sc->sc_ihs[intr_idx] = vih;
   6189 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6190 			goto fail;
   6191 		txrx_established++;
   6192 		intr_idx++;
   6193 	}
   6194 
   6195 	/* LINK */
   6196 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6197 	    sizeof(intrbuf));
   6198 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6199 	memset(intr_xname, 0, sizeof(intr_xname));
   6200 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6201 	    device_xname(sc->sc_dev));
   6202 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6203 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6204 	if (vih == NULL) {
   6205 		aprint_error_dev(sc->sc_dev,
   6206 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6207 		    intrstr ? " at " : "",
   6208 		    intrstr ? intrstr : "");
   6209 
   6210 		goto fail;
   6211 	}
   6212 	/* Keep default affinity to LINK interrupt */
   6213 	aprint_normal_dev(sc->sc_dev,
   6214 	    "for LINK interrupting at %s\n", intrstr);
   6215 	sc->sc_ihs[intr_idx] = vih;
   6216 	sc->sc_link_intr_idx = intr_idx;
   6217 
   6218 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6219 	kcpuset_destroy(affinity);
   6220 	return 0;
   6221 
   6222 fail:
   6223 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6224 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6225 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6226 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6227 	}
   6228 
   6229 	kcpuset_destroy(affinity);
   6230 	return ENOMEM;
   6231 }
   6232 
   6233 static void
   6234 wm_unset_stopping_flags(struct wm_softc *sc)
   6235 {
   6236 	int i;
   6237 
   6238 	KASSERT(mutex_owned(sc->sc_core_lock));
   6239 
   6240 	/* Must unset stopping flags in ascending order. */
   6241 	for (i = 0; i < sc->sc_nqueues; i++) {
   6242 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6243 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6244 
   6245 		mutex_enter(txq->txq_lock);
   6246 		txq->txq_stopping = false;
   6247 		mutex_exit(txq->txq_lock);
   6248 
   6249 		mutex_enter(rxq->rxq_lock);
   6250 		rxq->rxq_stopping = false;
   6251 		mutex_exit(rxq->rxq_lock);
   6252 	}
   6253 
   6254 	sc->sc_core_stopping = false;
   6255 }
   6256 
   6257 static void
   6258 wm_set_stopping_flags(struct wm_softc *sc)
   6259 {
   6260 	int i;
   6261 
   6262 	KASSERT(mutex_owned(sc->sc_core_lock));
   6263 
   6264 	sc->sc_core_stopping = true;
   6265 
   6266 	/* Must set stopping flags in ascending order. */
   6267 	for (i = 0; i < sc->sc_nqueues; i++) {
   6268 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6269 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6270 
   6271 		mutex_enter(rxq->rxq_lock);
   6272 		rxq->rxq_stopping = true;
   6273 		mutex_exit(rxq->rxq_lock);
   6274 
   6275 		mutex_enter(txq->txq_lock);
   6276 		txq->txq_stopping = true;
   6277 		mutex_exit(txq->txq_lock);
   6278 	}
   6279 }
   6280 
   6281 /*
   6282  * Write interrupt interval value to ITR or EITR
   6283  */
   6284 static void
   6285 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6286 {
   6287 
   6288 	if (!wmq->wmq_set_itr)
   6289 		return;
   6290 
   6291 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6292 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6293 
   6294 		/*
   6295 		 * 82575 doesn't have CNT_INGR field.
   6296 		 * So, overwrite counter field by software.
   6297 		 */
   6298 		if (sc->sc_type == WM_T_82575)
   6299 			eitr |= __SHIFTIN(wmq->wmq_itr,
   6300 			    EITR_COUNTER_MASK_82575);
   6301 		else
   6302 			eitr |= EITR_CNT_INGR;
   6303 
   6304 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6305 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6306 		/*
   6307 		 * 82574 has both ITR and EITR. SET EITR when we use
   6308 		 * the multi queue function with MSI-X.
   6309 		 */
   6310 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6311 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6312 	} else {
   6313 		KASSERT(wmq->wmq_id == 0);
   6314 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6315 	}
   6316 
   6317 	wmq->wmq_set_itr = false;
   6318 }
   6319 
   6320 /*
   6321  * TODO
   6322  * Below dynamic calculation of itr is almost the same as Linux igb,
   6323  * however it does not fit to wm(4). So, we will have been disable AIM
   6324  * until we will find appropriate calculation of itr.
   6325  */
   6326 /*
   6327  * Calculate interrupt interval value to be going to write register in
   6328  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6329  */
   6330 static void
   6331 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6332 {
   6333 #ifdef NOTYET
   6334 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6335 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6336 	uint32_t avg_size = 0;
   6337 	uint32_t new_itr;
   6338 
   6339 	if (rxq->rxq_packets)
   6340 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6341 	if (txq->txq_packets)
   6342 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6343 
   6344 	if (avg_size == 0) {
   6345 		new_itr = 450; /* restore default value */
   6346 		goto out;
   6347 	}
   6348 
   6349 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6350 	avg_size += 24;
   6351 
   6352 	/* Don't starve jumbo frames */
   6353 	avg_size = uimin(avg_size, 3000);
   6354 
   6355 	/* Give a little boost to mid-size frames */
   6356 	if ((avg_size > 300) && (avg_size < 1200))
   6357 		new_itr = avg_size / 3;
   6358 	else
   6359 		new_itr = avg_size / 2;
   6360 
   6361 out:
   6362 	/*
   6363 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6364 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6365 	 */
   6366 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6367 		new_itr *= 4;
   6368 
   6369 	if (new_itr != wmq->wmq_itr) {
   6370 		wmq->wmq_itr = new_itr;
   6371 		wmq->wmq_set_itr = true;
   6372 	} else
   6373 		wmq->wmq_set_itr = false;
   6374 
   6375 	rxq->rxq_packets = 0;
   6376 	rxq->rxq_bytes = 0;
   6377 	txq->txq_packets = 0;
   6378 	txq->txq_bytes = 0;
   6379 #endif
   6380 }
   6381 
   6382 static void
   6383 wm_init_sysctls(struct wm_softc *sc)
   6384 {
   6385 	struct sysctllog **log;
   6386 	const struct sysctlnode *rnode, *qnode, *cnode;
   6387 	int i, rv;
   6388 	const char *dvname;
   6389 
   6390 	log = &sc->sc_sysctllog;
   6391 	dvname = device_xname(sc->sc_dev);
   6392 
   6393 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6394 	    0, CTLTYPE_NODE, dvname,
   6395 	    SYSCTL_DESCR("wm information and settings"),
   6396 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6397 	if (rv != 0)
   6398 		goto err;
   6399 
   6400 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6401 	    CTLTYPE_BOOL, "txrx_workqueue",
   6402 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   6403 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6404 	if (rv != 0)
   6405 		goto teardown;
   6406 
   6407 	for (i = 0; i < sc->sc_nqueues; i++) {
   6408 		struct wm_queue *wmq = &sc->sc_queue[i];
   6409 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6410 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6411 
   6412 		snprintf(sc->sc_queue[i].sysctlname,
   6413 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6414 
   6415 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6416 		    0, CTLTYPE_NODE,
   6417 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6418 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6419 			break;
   6420 
   6421 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6422 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6423 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6424 		    NULL, 0, &txq->txq_free,
   6425 		    0, CTL_CREATE, CTL_EOL) != 0)
   6426 			break;
   6427 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6428 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6429 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6430 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6431 		    0, CTL_CREATE, CTL_EOL) != 0)
   6432 			break;
   6433 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6434 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6435 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6436 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6437 		    0, CTL_CREATE, CTL_EOL) != 0)
   6438 			break;
   6439 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6440 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6441 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6442 		    NULL, 0, &txq->txq_next,
   6443 		    0, CTL_CREATE, CTL_EOL) != 0)
   6444 			break;
   6445 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6446 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6447 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6448 		    NULL, 0, &txq->txq_sfree,
   6449 		    0, CTL_CREATE, CTL_EOL) != 0)
   6450 			break;
   6451 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6452 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6453 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6454 		    NULL, 0, &txq->txq_snext,
   6455 		    0, CTL_CREATE, CTL_EOL) != 0)
   6456 			break;
   6457 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6458 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6459 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6460 		    NULL, 0, &txq->txq_sdirty,
   6461 		    0, CTL_CREATE, CTL_EOL) != 0)
   6462 			break;
   6463 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6464 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6465 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6466 		    NULL, 0, &txq->txq_flags,
   6467 		    0, CTL_CREATE, CTL_EOL) != 0)
   6468 			break;
   6469 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6470 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6471 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6472 		    NULL, 0, &txq->txq_stopping,
   6473 		    0, CTL_CREATE, CTL_EOL) != 0)
   6474 			break;
   6475 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6476 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6477 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6478 		    NULL, 0, &txq->txq_sending,
   6479 		    0, CTL_CREATE, CTL_EOL) != 0)
   6480 			break;
   6481 
   6482 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6483 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6484 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6485 		    NULL, 0, &rxq->rxq_ptr,
   6486 		    0, CTL_CREATE, CTL_EOL) != 0)
   6487 			break;
   6488 	}
   6489 
   6490 #ifdef WM_DEBUG
   6491 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6492 	    CTLTYPE_INT, "debug_flags",
   6493 	    SYSCTL_DESCR(
   6494 		    "Debug flags:\n"	\
   6495 		    "\t0x01 LINK\n"	\
   6496 		    "\t0x02 TX\n"	\
   6497 		    "\t0x04 RX\n"	\
   6498 		    "\t0x08 GMII\n"	\
   6499 		    "\t0x10 MANAGE\n"	\
   6500 		    "\t0x20 NVM\n"	\
   6501 		    "\t0x40 INIT\n"	\
   6502 		    "\t0x80 LOCK"),
   6503 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6504 	if (rv != 0)
   6505 		goto teardown;
   6506 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6507 	    CTLTYPE_BOOL, "trigger_reset",
   6508 	    SYSCTL_DESCR("Trigger an interface reset"),
   6509 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6510 	if (rv != 0)
   6511 		goto teardown;
   6512 #endif
   6513 
   6514 	return;
   6515 
   6516 teardown:
   6517 	sysctl_teardown(log);
   6518 err:
   6519 	sc->sc_sysctllog = NULL;
   6520 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6521 	    __func__, rv);
   6522 }
   6523 
   6524 /*
   6525  * wm_init:		[ifnet interface function]
   6526  *
   6527  *	Initialize the interface.
   6528  */
   6529 static int
   6530 wm_init(struct ifnet *ifp)
   6531 {
   6532 	struct wm_softc *sc = ifp->if_softc;
   6533 	int ret;
   6534 
   6535 	KASSERT(IFNET_LOCKED(ifp));
   6536 
   6537 	if (sc->sc_dying)
   6538 		return ENXIO;
   6539 
   6540 	mutex_enter(sc->sc_core_lock);
   6541 	ret = wm_init_locked(ifp);
   6542 	mutex_exit(sc->sc_core_lock);
   6543 
   6544 	return ret;
   6545 }
   6546 
   6547 static int
   6548 wm_init_locked(struct ifnet *ifp)
   6549 {
   6550 	struct wm_softc *sc = ifp->if_softc;
   6551 	struct ethercom *ec = &sc->sc_ethercom;
   6552 	int i, j, trynum, error = 0;
   6553 	uint32_t reg, sfp_mask = 0;
   6554 
   6555 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6556 		device_xname(sc->sc_dev), __func__));
   6557 	KASSERT(IFNET_LOCKED(ifp));
   6558 	KASSERT(mutex_owned(sc->sc_core_lock));
   6559 
   6560 	/*
   6561 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6562 	 * There is a small but measurable benefit to avoiding the adjusment
   6563 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6564 	 * on such platforms.  One possibility is that the DMA itself is
   6565 	 * slightly more efficient if the front of the entire packet (instead
   6566 	 * of the front of the headers) is aligned.
   6567 	 *
   6568 	 * Note we must always set align_tweak to 0 if we are using
   6569 	 * jumbo frames.
   6570 	 */
   6571 #ifdef __NO_STRICT_ALIGNMENT
   6572 	sc->sc_align_tweak = 0;
   6573 #else
   6574 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6575 		sc->sc_align_tweak = 0;
   6576 	else
   6577 		sc->sc_align_tweak = 2;
   6578 #endif /* __NO_STRICT_ALIGNMENT */
   6579 
   6580 	/* Cancel any pending I/O. */
   6581 	wm_stop_locked(ifp, false, false);
   6582 
   6583 	/* Update statistics before reset */
   6584 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6585 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6586 
   6587 	/* >= PCH_SPT hardware workaround before reset. */
   6588 	if (sc->sc_type >= WM_T_PCH_SPT)
   6589 		wm_flush_desc_rings(sc);
   6590 
   6591 	/* Reset the chip to a known state. */
   6592 	wm_reset(sc);
   6593 
   6594 	/*
   6595 	 * AMT based hardware can now take control from firmware
   6596 	 * Do this after reset.
   6597 	 */
   6598 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6599 		wm_get_hw_control(sc);
   6600 
   6601 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6602 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6603 		wm_legacy_irq_quirk_spt(sc);
   6604 
   6605 	/* Init hardware bits */
   6606 	wm_initialize_hardware_bits(sc);
   6607 
   6608 	/* Reset the PHY. */
   6609 	if (sc->sc_flags & WM_F_HAS_MII)
   6610 		wm_gmii_reset(sc);
   6611 
   6612 	if (sc->sc_type >= WM_T_ICH8) {
   6613 		reg = CSR_READ(sc, WMREG_GCR);
   6614 		/*
   6615 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6616 		 * default after reset.
   6617 		 */
   6618 		if (sc->sc_type == WM_T_ICH8)
   6619 			reg |= GCR_NO_SNOOP_ALL;
   6620 		else
   6621 			reg &= ~GCR_NO_SNOOP_ALL;
   6622 		CSR_WRITE(sc, WMREG_GCR, reg);
   6623 	}
   6624 
   6625 	if ((sc->sc_type >= WM_T_ICH8)
   6626 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6627 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6628 
   6629 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6630 		reg |= CTRL_EXT_RO_DIS;
   6631 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6632 	}
   6633 
   6634 	/* Calculate (E)ITR value */
   6635 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6636 		/*
   6637 		 * For NEWQUEUE's EITR (except for 82575).
   6638 		 * 82575's EITR should be set same throttling value as other
   6639 		 * old controllers' ITR because the interrupt/sec calculation
   6640 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6641 		 *
   6642 		 * 82574's EITR should be set same throttling value as ITR.
   6643 		 *
   6644 		 * For N interrupts/sec, set this value to:
   6645 		 * 1,000,000 / N in contrast to ITR throttling value.
   6646 		 */
   6647 		sc->sc_itr_init = 450;
   6648 	} else if (sc->sc_type >= WM_T_82543) {
   6649 		/*
   6650 		 * Set up the interrupt throttling register (units of 256ns)
   6651 		 * Note that a footnote in Intel's documentation says this
   6652 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6653 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6654 		 * that that is also true for the 1024ns units of the other
   6655 		 * interrupt-related timer registers -- so, really, we ought
   6656 		 * to divide this value by 4 when the link speed is low.
   6657 		 *
   6658 		 * XXX implement this division at link speed change!
   6659 		 */
   6660 
   6661 		/*
   6662 		 * For N interrupts/sec, set this value to:
   6663 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6664 		 * absolute and packet timer values to this value
   6665 		 * divided by 4 to get "simple timer" behavior.
   6666 		 */
   6667 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6668 	}
   6669 
   6670 	error = wm_init_txrx_queues(sc);
   6671 	if (error)
   6672 		goto out;
   6673 
   6674 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6675 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6676 	    (sc->sc_type >= WM_T_82575))
   6677 		wm_serdes_power_up_link_82575(sc);
   6678 
   6679 	/* Clear out the VLAN table -- we don't use it (yet). */
   6680 	CSR_WRITE(sc, WMREG_VET, 0);
   6681 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6682 		trynum = 10; /* Due to hw errata */
   6683 	else
   6684 		trynum = 1;
   6685 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6686 		for (j = 0; j < trynum; j++)
   6687 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6688 
   6689 	/*
   6690 	 * Set up flow-control parameters.
   6691 	 *
   6692 	 * XXX Values could probably stand some tuning.
   6693 	 */
   6694 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6695 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6696 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6697 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6698 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6699 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6700 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6701 	}
   6702 
   6703 	sc->sc_fcrtl = FCRTL_DFLT;
   6704 	if (sc->sc_type < WM_T_82543) {
   6705 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6706 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6707 	} else {
   6708 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6709 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6710 	}
   6711 
   6712 	if (sc->sc_type == WM_T_80003)
   6713 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6714 	else
   6715 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6716 
   6717 	/* Writes the control register. */
   6718 	wm_set_vlan(sc);
   6719 
   6720 	if (sc->sc_flags & WM_F_HAS_MII) {
   6721 		uint16_t kmreg;
   6722 
   6723 		switch (sc->sc_type) {
   6724 		case WM_T_80003:
   6725 		case WM_T_ICH8:
   6726 		case WM_T_ICH9:
   6727 		case WM_T_ICH10:
   6728 		case WM_T_PCH:
   6729 		case WM_T_PCH2:
   6730 		case WM_T_PCH_LPT:
   6731 		case WM_T_PCH_SPT:
   6732 		case WM_T_PCH_CNP:
   6733 			/*
   6734 			 * Set the mac to wait the maximum time between each
   6735 			 * iteration and increase the max iterations when
   6736 			 * polling the phy; this fixes erroneous timeouts at
   6737 			 * 10Mbps.
   6738 			 */
   6739 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6740 			    0xFFFF);
   6741 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6742 			    &kmreg);
   6743 			kmreg |= 0x3F;
   6744 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6745 			    kmreg);
   6746 			break;
   6747 		default:
   6748 			break;
   6749 		}
   6750 
   6751 		if (sc->sc_type == WM_T_80003) {
   6752 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6753 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6754 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6755 
   6756 			/* Bypass RX and TX FIFOs */
   6757 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6758 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6759 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6760 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6761 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6762 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6763 		}
   6764 	}
   6765 #if 0
   6766 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6767 #endif
   6768 
   6769 	/* Set up checksum offload parameters. */
   6770 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6771 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6772 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6773 		reg |= RXCSUM_IPOFL;
   6774 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6775 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6776 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6777 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6778 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6779 
   6780 	/* Set registers about MSI-X */
   6781 	if (wm_is_using_msix(sc)) {
   6782 		uint32_t ivar, qintr_idx;
   6783 		struct wm_queue *wmq;
   6784 		unsigned int qid;
   6785 
   6786 		if (sc->sc_type == WM_T_82575) {
   6787 			/* Interrupt control */
   6788 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6789 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6790 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6791 
   6792 			/* TX and RX */
   6793 			for (i = 0; i < sc->sc_nqueues; i++) {
   6794 				wmq = &sc->sc_queue[i];
   6795 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6796 				    EITR_TX_QUEUE(wmq->wmq_id)
   6797 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6798 			}
   6799 			/* Link status */
   6800 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6801 			    EITR_OTHER);
   6802 		} else if (sc->sc_type == WM_T_82574) {
   6803 			/* Interrupt control */
   6804 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6805 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6806 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6807 
   6808 			/*
   6809 			 * Work around issue with spurious interrupts
   6810 			 * in MSI-X mode.
   6811 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6812 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6813 			 */
   6814 			reg = CSR_READ(sc, WMREG_RFCTL);
   6815 			reg |= WMREG_RFCTL_ACKDIS;
   6816 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6817 
   6818 			ivar = 0;
   6819 			/* TX and RX */
   6820 			for (i = 0; i < sc->sc_nqueues; i++) {
   6821 				wmq = &sc->sc_queue[i];
   6822 				qid = wmq->wmq_id;
   6823 				qintr_idx = wmq->wmq_intr_idx;
   6824 
   6825 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6826 				    IVAR_TX_MASK_Q_82574(qid));
   6827 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6828 				    IVAR_RX_MASK_Q_82574(qid));
   6829 			}
   6830 			/* Link status */
   6831 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6832 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6833 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6834 		} else {
   6835 			/* Interrupt control */
   6836 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6837 			    | GPIE_EIAME | GPIE_PBA);
   6838 
   6839 			switch (sc->sc_type) {
   6840 			case WM_T_82580:
   6841 			case WM_T_I350:
   6842 			case WM_T_I354:
   6843 			case WM_T_I210:
   6844 			case WM_T_I211:
   6845 				/* TX and RX */
   6846 				for (i = 0; i < sc->sc_nqueues; i++) {
   6847 					wmq = &sc->sc_queue[i];
   6848 					qid = wmq->wmq_id;
   6849 					qintr_idx = wmq->wmq_intr_idx;
   6850 
   6851 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6852 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6853 					ivar |= __SHIFTIN((qintr_idx
   6854 						| IVAR_VALID),
   6855 					    IVAR_TX_MASK_Q(qid));
   6856 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6857 					ivar |= __SHIFTIN((qintr_idx
   6858 						| IVAR_VALID),
   6859 					    IVAR_RX_MASK_Q(qid));
   6860 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6861 				}
   6862 				break;
   6863 			case WM_T_82576:
   6864 				/* TX and RX */
   6865 				for (i = 0; i < sc->sc_nqueues; i++) {
   6866 					wmq = &sc->sc_queue[i];
   6867 					qid = wmq->wmq_id;
   6868 					qintr_idx = wmq->wmq_intr_idx;
   6869 
   6870 					ivar = CSR_READ(sc,
   6871 					    WMREG_IVAR_Q_82576(qid));
   6872 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6873 					ivar |= __SHIFTIN((qintr_idx
   6874 						| IVAR_VALID),
   6875 					    IVAR_TX_MASK_Q_82576(qid));
   6876 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6877 					ivar |= __SHIFTIN((qintr_idx
   6878 						| IVAR_VALID),
   6879 					    IVAR_RX_MASK_Q_82576(qid));
   6880 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6881 					    ivar);
   6882 				}
   6883 				break;
   6884 			default:
   6885 				break;
   6886 			}
   6887 
   6888 			/* Link status */
   6889 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6890 			    IVAR_MISC_OTHER);
   6891 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6892 		}
   6893 
   6894 		if (wm_is_using_multiqueue(sc)) {
   6895 			wm_init_rss(sc);
   6896 
   6897 			/*
   6898 			** NOTE: Receive Full-Packet Checksum Offload
   6899 			** is mutually exclusive with Multiqueue. However
   6900 			** this is not the same as TCP/IP checksums which
   6901 			** still work.
   6902 			*/
   6903 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6904 			reg |= RXCSUM_PCSD;
   6905 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6906 		}
   6907 	}
   6908 
   6909 	/* Set up the interrupt registers. */
   6910 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6911 
   6912 	/* Enable SFP module insertion interrupt if it's required */
   6913 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6914 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6915 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6916 		sfp_mask = ICR_GPI(0);
   6917 	}
   6918 
   6919 	if (wm_is_using_msix(sc)) {
   6920 		uint32_t mask;
   6921 		struct wm_queue *wmq;
   6922 
   6923 		switch (sc->sc_type) {
   6924 		case WM_T_82574:
   6925 			mask = 0;
   6926 			for (i = 0; i < sc->sc_nqueues; i++) {
   6927 				wmq = &sc->sc_queue[i];
   6928 				mask |= ICR_TXQ(wmq->wmq_id);
   6929 				mask |= ICR_RXQ(wmq->wmq_id);
   6930 			}
   6931 			mask |= ICR_OTHER;
   6932 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6933 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6934 			break;
   6935 		default:
   6936 			if (sc->sc_type == WM_T_82575) {
   6937 				mask = 0;
   6938 				for (i = 0; i < sc->sc_nqueues; i++) {
   6939 					wmq = &sc->sc_queue[i];
   6940 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6941 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6942 				}
   6943 				mask |= EITR_OTHER;
   6944 			} else {
   6945 				mask = 0;
   6946 				for (i = 0; i < sc->sc_nqueues; i++) {
   6947 					wmq = &sc->sc_queue[i];
   6948 					mask |= 1 << wmq->wmq_intr_idx;
   6949 				}
   6950 				mask |= 1 << sc->sc_link_intr_idx;
   6951 			}
   6952 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6953 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6954 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6955 
   6956 			/* For other interrupts */
   6957 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6958 			break;
   6959 		}
   6960 	} else {
   6961 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6962 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6963 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6964 	}
   6965 
   6966 	/* Set up the inter-packet gap. */
   6967 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6968 
   6969 	if (sc->sc_type >= WM_T_82543) {
   6970 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6971 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6972 			wm_itrs_writereg(sc, wmq);
   6973 		}
   6974 		/*
   6975 		 * Link interrupts occur much less than TX
   6976 		 * interrupts and RX interrupts. So, we don't
   6977 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6978 		 * FreeBSD's if_igb.
   6979 		 */
   6980 	}
   6981 
   6982 	/* Set the VLAN EtherType. */
   6983 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6984 
   6985 	/*
   6986 	 * Set up the transmit control register; we start out with
   6987 	 * a collision distance suitable for FDX, but update it when
   6988 	 * we resolve the media type.
   6989 	 */
   6990 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6991 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6992 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6993 	if (sc->sc_type >= WM_T_82571)
   6994 		sc->sc_tctl |= TCTL_MULR;
   6995 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6996 
   6997 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6998 		/* Write TDT after TCTL.EN is set. See the document. */
   6999 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   7000 	}
   7001 
   7002 	if (sc->sc_type == WM_T_80003) {
   7003 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   7004 		reg &= ~TCTL_EXT_GCEX_MASK;
   7005 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   7006 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   7007 	}
   7008 
   7009 	/* Set the media. */
   7010 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   7011 		goto out;
   7012 
   7013 	/* Configure for OS presence */
   7014 	wm_init_manageability(sc);
   7015 
   7016 	/*
   7017 	 * Set up the receive control register; we actually program the
   7018 	 * register when we set the receive filter. Use multicast address
   7019 	 * offset type 0.
   7020 	 *
   7021 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7022 	 * don't enable that feature.
   7023 	 */
   7024 	sc->sc_mchash_type = 0;
   7025 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7026 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7027 
   7028 	/* 82574 use one buffer extended Rx descriptor. */
   7029 	if (sc->sc_type == WM_T_82574)
   7030 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7031 
   7032 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7033 		sc->sc_rctl |= RCTL_SECRC;
   7034 
   7035 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7036 	    && (ifp->if_mtu > ETHERMTU)) {
   7037 		sc->sc_rctl |= RCTL_LPE;
   7038 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7039 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7040 	}
   7041 
   7042 	if (MCLBYTES == 2048)
   7043 		sc->sc_rctl |= RCTL_2k;
   7044 	else {
   7045 		if (sc->sc_type >= WM_T_82543) {
   7046 			switch (MCLBYTES) {
   7047 			case 4096:
   7048 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7049 				break;
   7050 			case 8192:
   7051 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7052 				break;
   7053 			case 16384:
   7054 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7055 				break;
   7056 			default:
   7057 				panic("wm_init: MCLBYTES %d unsupported",
   7058 				    MCLBYTES);
   7059 				break;
   7060 			}
   7061 		} else
   7062 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7063 	}
   7064 
   7065 	/* Enable ECC */
   7066 	switch (sc->sc_type) {
   7067 	case WM_T_82571:
   7068 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7069 		reg |= PBA_ECC_CORR_EN;
   7070 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7071 		break;
   7072 	case WM_T_PCH_LPT:
   7073 	case WM_T_PCH_SPT:
   7074 	case WM_T_PCH_CNP:
   7075 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7076 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7077 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7078 
   7079 		sc->sc_ctrl |= CTRL_MEHE;
   7080 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7081 		break;
   7082 	default:
   7083 		break;
   7084 	}
   7085 
   7086 	/*
   7087 	 * Set the receive filter.
   7088 	 *
   7089 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7090 	 * the setting of RCTL.EN in wm_set_filter()
   7091 	 */
   7092 	wm_set_filter(sc);
   7093 
   7094 	/* On 575 and later set RDT only if RX enabled */
   7095 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7096 		int qidx;
   7097 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7098 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7099 			for (i = 0; i < WM_NRXDESC; i++) {
   7100 				mutex_enter(rxq->rxq_lock);
   7101 				wm_init_rxdesc(rxq, i);
   7102 				mutex_exit(rxq->rxq_lock);
   7103 
   7104 			}
   7105 		}
   7106 	}
   7107 
   7108 	wm_unset_stopping_flags(sc);
   7109 
   7110 	/* Start the one second link check clock. */
   7111 	callout_schedule(&sc->sc_tick_ch, hz);
   7112 
   7113 	/*
   7114 	 * ...all done! (IFNET_LOCKED asserted above.)
   7115 	 */
   7116 	ifp->if_flags |= IFF_RUNNING;
   7117 
   7118 out:
   7119 	/* Save last flags for the callback */
   7120 	sc->sc_if_flags = ifp->if_flags;
   7121 	sc->sc_ec_capenable = ec->ec_capenable;
   7122 	if (error)
   7123 		log(LOG_ERR, "%s: interface not running\n",
   7124 		    device_xname(sc->sc_dev));
   7125 	return error;
   7126 }
   7127 
   7128 /*
   7129  * wm_stop:		[ifnet interface function]
   7130  *
   7131  *	Stop transmission on the interface.
   7132  */
   7133 static void
   7134 wm_stop(struct ifnet *ifp, int disable)
   7135 {
   7136 	struct wm_softc *sc = ifp->if_softc;
   7137 
   7138 	ASSERT_SLEEPABLE();
   7139 	KASSERT(IFNET_LOCKED(ifp));
   7140 
   7141 	mutex_enter(sc->sc_core_lock);
   7142 	wm_stop_locked(ifp, disable ? true : false, true);
   7143 	mutex_exit(sc->sc_core_lock);
   7144 
   7145 	/*
   7146 	 * After wm_set_stopping_flags(), it is guaranteed that
   7147 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7148 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7149 	 * because it can sleep...
   7150 	 * so, call workqueue_wait() here.
   7151 	 */
   7152 	for (int i = 0; i < sc->sc_nqueues; i++)
   7153 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7154 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7155 }
   7156 
   7157 static void
   7158 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7159 {
   7160 	struct wm_softc *sc = ifp->if_softc;
   7161 	struct wm_txsoft *txs;
   7162 	int i, qidx;
   7163 
   7164 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7165 		device_xname(sc->sc_dev), __func__));
   7166 	KASSERT(IFNET_LOCKED(ifp));
   7167 	KASSERT(mutex_owned(sc->sc_core_lock));
   7168 
   7169 	wm_set_stopping_flags(sc);
   7170 
   7171 	if (sc->sc_flags & WM_F_HAS_MII) {
   7172 		/* Down the MII. */
   7173 		mii_down(&sc->sc_mii);
   7174 	} else {
   7175 #if 0
   7176 		/* Should we clear PHY's status properly? */
   7177 		wm_reset(sc);
   7178 #endif
   7179 	}
   7180 
   7181 	/* Stop the transmit and receive processes. */
   7182 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7183 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7184 	sc->sc_rctl &= ~RCTL_EN;
   7185 
   7186 	/*
   7187 	 * Clear the interrupt mask to ensure the device cannot assert its
   7188 	 * interrupt line.
   7189 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7190 	 * service any currently pending or shared interrupt.
   7191 	 */
   7192 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7193 	sc->sc_icr = 0;
   7194 	if (wm_is_using_msix(sc)) {
   7195 		if (sc->sc_type != WM_T_82574) {
   7196 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7197 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7198 		} else
   7199 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7200 	}
   7201 
   7202 	/*
   7203 	 * Stop callouts after interrupts are disabled; if we have
   7204 	 * to wait for them, we will be releasing the CORE_LOCK
   7205 	 * briefly, which will unblock interrupts on the current CPU.
   7206 	 */
   7207 
   7208 	/* Stop the one second clock. */
   7209 	if (wait)
   7210 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7211 	else
   7212 		callout_stop(&sc->sc_tick_ch);
   7213 
   7214 	/* Stop the 82547 Tx FIFO stall check timer. */
   7215 	if (sc->sc_type == WM_T_82547) {
   7216 		if (wait)
   7217 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7218 		else
   7219 			callout_stop(&sc->sc_txfifo_ch);
   7220 	}
   7221 
   7222 	/* Release any queued transmit buffers. */
   7223 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7224 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7225 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7226 		struct mbuf *m;
   7227 
   7228 		mutex_enter(txq->txq_lock);
   7229 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7230 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7231 			txs = &txq->txq_soft[i];
   7232 			if (txs->txs_mbuf != NULL) {
   7233 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7234 				m_freem(txs->txs_mbuf);
   7235 				txs->txs_mbuf = NULL;
   7236 			}
   7237 		}
   7238 		/* Drain txq_interq */
   7239 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7240 			m_freem(m);
   7241 		mutex_exit(txq->txq_lock);
   7242 	}
   7243 
   7244 	/* Mark the interface as down and cancel the watchdog timer. */
   7245 	ifp->if_flags &= ~IFF_RUNNING;
   7246 	sc->sc_if_flags = ifp->if_flags;
   7247 
   7248 	if (disable) {
   7249 		for (i = 0; i < sc->sc_nqueues; i++) {
   7250 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7251 			mutex_enter(rxq->rxq_lock);
   7252 			wm_rxdrain(rxq);
   7253 			mutex_exit(rxq->rxq_lock);
   7254 		}
   7255 	}
   7256 
   7257 #if 0 /* notyet */
   7258 	if (sc->sc_type >= WM_T_82544)
   7259 		CSR_WRITE(sc, WMREG_WUC, 0);
   7260 #endif
   7261 }
   7262 
   7263 static void
   7264 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7265 {
   7266 	struct mbuf *m;
   7267 	int i;
   7268 
   7269 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7270 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7271 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7272 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7273 		    m->m_data, m->m_len, m->m_flags);
   7274 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7275 	    i, i == 1 ? "" : "s");
   7276 }
   7277 
   7278 /*
   7279  * wm_82547_txfifo_stall:
   7280  *
   7281  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7282  *	reset the FIFO pointers, and restart packet transmission.
   7283  */
   7284 static void
   7285 wm_82547_txfifo_stall(void *arg)
   7286 {
   7287 	struct wm_softc *sc = arg;
   7288 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7289 
   7290 	mutex_enter(txq->txq_lock);
   7291 
   7292 	if (txq->txq_stopping)
   7293 		goto out;
   7294 
   7295 	if (txq->txq_fifo_stall) {
   7296 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7297 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7298 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7299 			/*
   7300 			 * Packets have drained.  Stop transmitter, reset
   7301 			 * FIFO pointers, restart transmitter, and kick
   7302 			 * the packet queue.
   7303 			 */
   7304 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7305 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7306 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7307 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7308 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7309 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7310 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7311 			CSR_WRITE_FLUSH(sc);
   7312 
   7313 			txq->txq_fifo_head = 0;
   7314 			txq->txq_fifo_stall = 0;
   7315 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7316 		} else {
   7317 			/*
   7318 			 * Still waiting for packets to drain; try again in
   7319 			 * another tick.
   7320 			 */
   7321 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7322 		}
   7323 	}
   7324 
   7325 out:
   7326 	mutex_exit(txq->txq_lock);
   7327 }
   7328 
   7329 /*
   7330  * wm_82547_txfifo_bugchk:
   7331  *
   7332  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7333  *	prevent enqueueing a packet that would wrap around the end
   7334  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7335  *
   7336  *	We do this by checking the amount of space before the end
   7337  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7338  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7339  *	the internal FIFO pointers to the beginning, and restart
   7340  *	transmission on the interface.
   7341  */
   7342 #define	WM_FIFO_HDR		0x10
   7343 #define	WM_82547_PAD_LEN	0x3e0
   7344 static int
   7345 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7346 {
   7347 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7348 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7349 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7350 
   7351 	/* Just return if already stalled. */
   7352 	if (txq->txq_fifo_stall)
   7353 		return 1;
   7354 
   7355 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7356 		/* Stall only occurs in half-duplex mode. */
   7357 		goto send_packet;
   7358 	}
   7359 
   7360 	if (len >= WM_82547_PAD_LEN + space) {
   7361 		txq->txq_fifo_stall = 1;
   7362 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7363 		return 1;
   7364 	}
   7365 
   7366 send_packet:
   7367 	txq->txq_fifo_head += len;
   7368 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7369 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7370 
   7371 	return 0;
   7372 }
   7373 
   7374 static int
   7375 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7376 {
   7377 	int error;
   7378 
   7379 	/*
   7380 	 * Allocate the control data structures, and create and load the
   7381 	 * DMA map for it.
   7382 	 *
   7383 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7384 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7385 	 * both sets within the same 4G segment.
   7386 	 */
   7387 	if (sc->sc_type < WM_T_82544)
   7388 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7389 	else
   7390 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7391 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7392 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7393 	else
   7394 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7395 
   7396 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7397 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7398 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7399 		aprint_error_dev(sc->sc_dev,
   7400 		    "unable to allocate TX control data, error = %d\n",
   7401 		    error);
   7402 		goto fail_0;
   7403 	}
   7404 
   7405 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7406 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7407 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7408 		aprint_error_dev(sc->sc_dev,
   7409 		    "unable to map TX control data, error = %d\n", error);
   7410 		goto fail_1;
   7411 	}
   7412 
   7413 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7414 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7415 		aprint_error_dev(sc->sc_dev,
   7416 		    "unable to create TX control data DMA map, error = %d\n",
   7417 		    error);
   7418 		goto fail_2;
   7419 	}
   7420 
   7421 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7422 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7423 		aprint_error_dev(sc->sc_dev,
   7424 		    "unable to load TX control data DMA map, error = %d\n",
   7425 		    error);
   7426 		goto fail_3;
   7427 	}
   7428 
   7429 	return 0;
   7430 
   7431 fail_3:
   7432 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7433 fail_2:
   7434 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7435 	    WM_TXDESCS_SIZE(txq));
   7436 fail_1:
   7437 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7438 fail_0:
   7439 	return error;
   7440 }
   7441 
   7442 static void
   7443 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7444 {
   7445 
   7446 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7447 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7448 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7449 	    WM_TXDESCS_SIZE(txq));
   7450 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7451 }
   7452 
   7453 static int
   7454 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7455 {
   7456 	int error;
   7457 	size_t rxq_descs_size;
   7458 
   7459 	/*
   7460 	 * Allocate the control data structures, and create and load the
   7461 	 * DMA map for it.
   7462 	 *
   7463 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7464 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7465 	 * both sets within the same 4G segment.
   7466 	 */
   7467 	rxq->rxq_ndesc = WM_NRXDESC;
   7468 	if (sc->sc_type == WM_T_82574)
   7469 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7470 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7471 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7472 	else
   7473 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7474 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7475 
   7476 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7477 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7478 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7479 		aprint_error_dev(sc->sc_dev,
   7480 		    "unable to allocate RX control data, error = %d\n",
   7481 		    error);
   7482 		goto fail_0;
   7483 	}
   7484 
   7485 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7486 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7487 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7488 		aprint_error_dev(sc->sc_dev,
   7489 		    "unable to map RX control data, error = %d\n", error);
   7490 		goto fail_1;
   7491 	}
   7492 
   7493 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7494 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7495 		aprint_error_dev(sc->sc_dev,
   7496 		    "unable to create RX control data DMA map, error = %d\n",
   7497 		    error);
   7498 		goto fail_2;
   7499 	}
   7500 
   7501 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7502 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7503 		aprint_error_dev(sc->sc_dev,
   7504 		    "unable to load RX control data DMA map, error = %d\n",
   7505 		    error);
   7506 		goto fail_3;
   7507 	}
   7508 
   7509 	return 0;
   7510 
   7511  fail_3:
   7512 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7513  fail_2:
   7514 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7515 	    rxq_descs_size);
   7516  fail_1:
   7517 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7518  fail_0:
   7519 	return error;
   7520 }
   7521 
   7522 static void
   7523 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7524 {
   7525 
   7526 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7527 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7528 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7529 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7530 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7531 }
   7532 
   7533 
   7534 static int
   7535 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7536 {
   7537 	int i, error;
   7538 
   7539 	/* Create the transmit buffer DMA maps. */
   7540 	WM_TXQUEUELEN(txq) =
   7541 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7542 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7543 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7544 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7545 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7546 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7547 			aprint_error_dev(sc->sc_dev,
   7548 			    "unable to create Tx DMA map %d, error = %d\n",
   7549 			    i, error);
   7550 			goto fail;
   7551 		}
   7552 	}
   7553 
   7554 	return 0;
   7555 
   7556 fail:
   7557 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7558 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7559 			bus_dmamap_destroy(sc->sc_dmat,
   7560 			    txq->txq_soft[i].txs_dmamap);
   7561 	}
   7562 	return error;
   7563 }
   7564 
   7565 static void
   7566 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7567 {
   7568 	int i;
   7569 
   7570 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7571 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7572 			bus_dmamap_destroy(sc->sc_dmat,
   7573 			    txq->txq_soft[i].txs_dmamap);
   7574 	}
   7575 }
   7576 
   7577 static int
   7578 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7579 {
   7580 	int i, error;
   7581 
   7582 	/* Create the receive buffer DMA maps. */
   7583 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7584 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7585 			    MCLBYTES, 0, 0,
   7586 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7587 			aprint_error_dev(sc->sc_dev,
   7588 			    "unable to create Rx DMA map %d error = %d\n",
   7589 			    i, error);
   7590 			goto fail;
   7591 		}
   7592 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7593 	}
   7594 
   7595 	return 0;
   7596 
   7597  fail:
   7598 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7599 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7600 			bus_dmamap_destroy(sc->sc_dmat,
   7601 			    rxq->rxq_soft[i].rxs_dmamap);
   7602 	}
   7603 	return error;
   7604 }
   7605 
   7606 static void
   7607 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7608 {
   7609 	int i;
   7610 
   7611 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7612 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7613 			bus_dmamap_destroy(sc->sc_dmat,
   7614 			    rxq->rxq_soft[i].rxs_dmamap);
   7615 	}
   7616 }
   7617 
   7618 /*
   7619  * wm_alloc_quques:
   7620  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7621  */
   7622 static int
   7623 wm_alloc_txrx_queues(struct wm_softc *sc)
   7624 {
   7625 	int i, error, tx_done, rx_done;
   7626 
   7627 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7628 	    KM_SLEEP);
   7629 	if (sc->sc_queue == NULL) {
   7630 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7631 		error = ENOMEM;
   7632 		goto fail_0;
   7633 	}
   7634 
   7635 	/* For transmission */
   7636 	error = 0;
   7637 	tx_done = 0;
   7638 	for (i = 0; i < sc->sc_nqueues; i++) {
   7639 #ifdef WM_EVENT_COUNTERS
   7640 		int j;
   7641 		const char *xname;
   7642 #endif
   7643 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7644 		txq->txq_sc = sc;
   7645 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7646 
   7647 		error = wm_alloc_tx_descs(sc, txq);
   7648 		if (error)
   7649 			break;
   7650 		error = wm_alloc_tx_buffer(sc, txq);
   7651 		if (error) {
   7652 			wm_free_tx_descs(sc, txq);
   7653 			break;
   7654 		}
   7655 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7656 		if (txq->txq_interq == NULL) {
   7657 			wm_free_tx_descs(sc, txq);
   7658 			wm_free_tx_buffer(sc, txq);
   7659 			error = ENOMEM;
   7660 			break;
   7661 		}
   7662 
   7663 #ifdef WM_EVENT_COUNTERS
   7664 		xname = device_xname(sc->sc_dev);
   7665 
   7666 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7667 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7668 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7669 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7670 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7671 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7672 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7673 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7674 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7675 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7676 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7677 
   7678 		for (j = 0; j < WM_NTXSEGS; j++) {
   7679 			snprintf(txq->txq_txseg_evcnt_names[j],
   7680 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   7681 			    "txq%02dtxseg%d", i, j);
   7682 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   7683 			    EVCNT_TYPE_MISC,
   7684 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7685 		}
   7686 
   7687 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7688 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7689 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7690 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7691 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7692 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7693 #endif /* WM_EVENT_COUNTERS */
   7694 
   7695 		tx_done++;
   7696 	}
   7697 	if (error)
   7698 		goto fail_1;
   7699 
   7700 	/* For receive */
   7701 	error = 0;
   7702 	rx_done = 0;
   7703 	for (i = 0; i < sc->sc_nqueues; i++) {
   7704 #ifdef WM_EVENT_COUNTERS
   7705 		const char *xname;
   7706 #endif
   7707 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7708 		rxq->rxq_sc = sc;
   7709 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7710 
   7711 		error = wm_alloc_rx_descs(sc, rxq);
   7712 		if (error)
   7713 			break;
   7714 
   7715 		error = wm_alloc_rx_buffer(sc, rxq);
   7716 		if (error) {
   7717 			wm_free_rx_descs(sc, rxq);
   7718 			break;
   7719 		}
   7720 
   7721 #ifdef WM_EVENT_COUNTERS
   7722 		xname = device_xname(sc->sc_dev);
   7723 
   7724 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7725 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7726 
   7727 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7728 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7729 #endif /* WM_EVENT_COUNTERS */
   7730 
   7731 		rx_done++;
   7732 	}
   7733 	if (error)
   7734 		goto fail_2;
   7735 
   7736 	return 0;
   7737 
   7738 fail_2:
   7739 	for (i = 0; i < rx_done; i++) {
   7740 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7741 		wm_free_rx_buffer(sc, rxq);
   7742 		wm_free_rx_descs(sc, rxq);
   7743 		if (rxq->rxq_lock)
   7744 			mutex_obj_free(rxq->rxq_lock);
   7745 	}
   7746 fail_1:
   7747 	for (i = 0; i < tx_done; i++) {
   7748 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7749 		pcq_destroy(txq->txq_interq);
   7750 		wm_free_tx_buffer(sc, txq);
   7751 		wm_free_tx_descs(sc, txq);
   7752 		if (txq->txq_lock)
   7753 			mutex_obj_free(txq->txq_lock);
   7754 	}
   7755 
   7756 	kmem_free(sc->sc_queue,
   7757 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7758 fail_0:
   7759 	return error;
   7760 }
   7761 
   7762 /*
   7763  * wm_free_quques:
   7764  *	Free {tx,rx}descs and {tx,rx} buffers
   7765  */
   7766 static void
   7767 wm_free_txrx_queues(struct wm_softc *sc)
   7768 {
   7769 	int i;
   7770 
   7771 	for (i = 0; i < sc->sc_nqueues; i++) {
   7772 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7773 
   7774 #ifdef WM_EVENT_COUNTERS
   7775 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7776 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7777 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7778 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7779 #endif /* WM_EVENT_COUNTERS */
   7780 
   7781 		wm_free_rx_buffer(sc, rxq);
   7782 		wm_free_rx_descs(sc, rxq);
   7783 		if (rxq->rxq_lock)
   7784 			mutex_obj_free(rxq->rxq_lock);
   7785 	}
   7786 
   7787 	for (i = 0; i < sc->sc_nqueues; i++) {
   7788 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7789 		struct mbuf *m;
   7790 #ifdef WM_EVENT_COUNTERS
   7791 		int j;
   7792 
   7793 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7794 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7795 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7796 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7797 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7798 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7799 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7800 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7801 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7802 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7803 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7804 
   7805 		for (j = 0; j < WM_NTXSEGS; j++)
   7806 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7807 
   7808 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7809 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7810 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7811 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7812 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7813 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7814 #endif /* WM_EVENT_COUNTERS */
   7815 
   7816 		/* Drain txq_interq */
   7817 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7818 			m_freem(m);
   7819 		pcq_destroy(txq->txq_interq);
   7820 
   7821 		wm_free_tx_buffer(sc, txq);
   7822 		wm_free_tx_descs(sc, txq);
   7823 		if (txq->txq_lock)
   7824 			mutex_obj_free(txq->txq_lock);
   7825 	}
   7826 
   7827 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7828 }
   7829 
   7830 static void
   7831 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7832 {
   7833 
   7834 	KASSERT(mutex_owned(txq->txq_lock));
   7835 
   7836 	/* Initialize the transmit descriptor ring. */
   7837 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7838 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7839 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7840 	txq->txq_free = WM_NTXDESC(txq);
   7841 	txq->txq_next = 0;
   7842 }
   7843 
   7844 static void
   7845 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7846     struct wm_txqueue *txq)
   7847 {
   7848 
   7849 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7850 		device_xname(sc->sc_dev), __func__));
   7851 	KASSERT(mutex_owned(txq->txq_lock));
   7852 
   7853 	if (sc->sc_type < WM_T_82543) {
   7854 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7855 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7856 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7857 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7858 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7859 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7860 	} else {
   7861 		int qid = wmq->wmq_id;
   7862 
   7863 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7864 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7865 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7866 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7867 
   7868 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7869 			/*
   7870 			 * Don't write TDT before TCTL.EN is set.
   7871 			 * See the document.
   7872 			 */
   7873 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7874 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7875 			    | TXDCTL_WTHRESH(0));
   7876 		else {
   7877 			/* XXX should update with AIM? */
   7878 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7879 			if (sc->sc_type >= WM_T_82540) {
   7880 				/* Should be the same */
   7881 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7882 			}
   7883 
   7884 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7885 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7886 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7887 		}
   7888 	}
   7889 }
   7890 
   7891 static void
   7892 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7893 {
   7894 	int i;
   7895 
   7896 	KASSERT(mutex_owned(txq->txq_lock));
   7897 
   7898 	/* Initialize the transmit job descriptors. */
   7899 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7900 		txq->txq_soft[i].txs_mbuf = NULL;
   7901 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7902 	txq->txq_snext = 0;
   7903 	txq->txq_sdirty = 0;
   7904 }
   7905 
   7906 static void
   7907 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7908     struct wm_txqueue *txq)
   7909 {
   7910 
   7911 	KASSERT(mutex_owned(txq->txq_lock));
   7912 
   7913 	/*
   7914 	 * Set up some register offsets that are different between
   7915 	 * the i82542 and the i82543 and later chips.
   7916 	 */
   7917 	if (sc->sc_type < WM_T_82543)
   7918 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7919 	else
   7920 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7921 
   7922 	wm_init_tx_descs(sc, txq);
   7923 	wm_init_tx_regs(sc, wmq, txq);
   7924 	wm_init_tx_buffer(sc, txq);
   7925 
   7926 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   7927 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   7928 
   7929 	txq->txq_sending = false;
   7930 }
   7931 
   7932 static void
   7933 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7934     struct wm_rxqueue *rxq)
   7935 {
   7936 
   7937 	KASSERT(mutex_owned(rxq->rxq_lock));
   7938 
   7939 	/*
   7940 	 * Initialize the receive descriptor and receive job
   7941 	 * descriptor rings.
   7942 	 */
   7943 	if (sc->sc_type < WM_T_82543) {
   7944 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7945 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7946 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7947 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7948 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7949 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7950 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7951 
   7952 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7953 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7954 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7955 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7956 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7957 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7958 	} else {
   7959 		int qid = wmq->wmq_id;
   7960 
   7961 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7962 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7963 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7964 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7965 
   7966 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7967 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7968 				panic("%s: MCLBYTES %d unsupported for 82575 "
   7969 				    "or higher\n", __func__, MCLBYTES);
   7970 
   7971 			/*
   7972 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   7973 			 * only.
   7974 			 */
   7975 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   7976 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   7977 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7978 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7979 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7980 			    | RXDCTL_WTHRESH(1));
   7981 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7982 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7983 		} else {
   7984 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7985 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7986 			/* XXX should update with AIM? */
   7987 			CSR_WRITE(sc, WMREG_RDTR,
   7988 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7989 			/* MUST be same */
   7990 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7991 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7992 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7993 		}
   7994 	}
   7995 }
   7996 
   7997 static int
   7998 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7999 {
   8000 	struct wm_rxsoft *rxs;
   8001 	int error, i;
   8002 
   8003 	KASSERT(mutex_owned(rxq->rxq_lock));
   8004 
   8005 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8006 		rxs = &rxq->rxq_soft[i];
   8007 		if (rxs->rxs_mbuf == NULL) {
   8008 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   8009 				log(LOG_ERR, "%s: unable to allocate or map "
   8010 				    "rx buffer %d, error = %d\n",
   8011 				    device_xname(sc->sc_dev), i, error);
   8012 				/*
   8013 				 * XXX Should attempt to run with fewer receive
   8014 				 * XXX buffers instead of just failing.
   8015 				 */
   8016 				wm_rxdrain(rxq);
   8017 				return ENOMEM;
   8018 			}
   8019 		} else {
   8020 			/*
   8021 			 * For 82575 and 82576, the RX descriptors must be
   8022 			 * initialized after the setting of RCTL.EN in
   8023 			 * wm_set_filter()
   8024 			 */
   8025 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8026 				wm_init_rxdesc(rxq, i);
   8027 		}
   8028 	}
   8029 	rxq->rxq_ptr = 0;
   8030 	rxq->rxq_discard = 0;
   8031 	WM_RXCHAIN_RESET(rxq);
   8032 
   8033 	return 0;
   8034 }
   8035 
   8036 static int
   8037 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8038     struct wm_rxqueue *rxq)
   8039 {
   8040 
   8041 	KASSERT(mutex_owned(rxq->rxq_lock));
   8042 
   8043 	/*
   8044 	 * Set up some register offsets that are different between
   8045 	 * the i82542 and the i82543 and later chips.
   8046 	 */
   8047 	if (sc->sc_type < WM_T_82543)
   8048 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8049 	else
   8050 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8051 
   8052 	wm_init_rx_regs(sc, wmq, rxq);
   8053 	return wm_init_rx_buffer(sc, rxq);
   8054 }
   8055 
   8056 /*
   8057  * wm_init_quques:
   8058  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8059  */
   8060 static int
   8061 wm_init_txrx_queues(struct wm_softc *sc)
   8062 {
   8063 	int i, error = 0;
   8064 
   8065 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8066 		device_xname(sc->sc_dev), __func__));
   8067 
   8068 	for (i = 0; i < sc->sc_nqueues; i++) {
   8069 		struct wm_queue *wmq = &sc->sc_queue[i];
   8070 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8071 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8072 
   8073 		/*
   8074 		 * TODO
   8075 		 * Currently, use constant variable instead of AIM.
   8076 		 * Furthermore, the interrupt interval of multiqueue which use
   8077 		 * polling mode is less than default value.
   8078 		 * More tuning and AIM are required.
   8079 		 */
   8080 		if (wm_is_using_multiqueue(sc))
   8081 			wmq->wmq_itr = 50;
   8082 		else
   8083 			wmq->wmq_itr = sc->sc_itr_init;
   8084 		wmq->wmq_set_itr = true;
   8085 
   8086 		mutex_enter(txq->txq_lock);
   8087 		wm_init_tx_queue(sc, wmq, txq);
   8088 		mutex_exit(txq->txq_lock);
   8089 
   8090 		mutex_enter(rxq->rxq_lock);
   8091 		error = wm_init_rx_queue(sc, wmq, rxq);
   8092 		mutex_exit(rxq->rxq_lock);
   8093 		if (error)
   8094 			break;
   8095 	}
   8096 
   8097 	return error;
   8098 }
   8099 
   8100 /*
   8101  * wm_tx_offload:
   8102  *
   8103  *	Set up TCP/IP checksumming parameters for the
   8104  *	specified packet.
   8105  */
   8106 static void
   8107 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8108     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8109 {
   8110 	struct mbuf *m0 = txs->txs_mbuf;
   8111 	struct livengood_tcpip_ctxdesc *t;
   8112 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8113 	uint32_t ipcse;
   8114 	struct ether_header *eh;
   8115 	int offset, iphl;
   8116 	uint8_t fields;
   8117 
   8118 	/*
   8119 	 * XXX It would be nice if the mbuf pkthdr had offset
   8120 	 * fields for the protocol headers.
   8121 	 */
   8122 
   8123 	eh = mtod(m0, struct ether_header *);
   8124 	switch (htons(eh->ether_type)) {
   8125 	case ETHERTYPE_IP:
   8126 	case ETHERTYPE_IPV6:
   8127 		offset = ETHER_HDR_LEN;
   8128 		break;
   8129 
   8130 	case ETHERTYPE_VLAN:
   8131 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8132 		break;
   8133 
   8134 	default:
   8135 		/* Don't support this protocol or encapsulation. */
   8136 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8137 		txq->txq_last_hw_ipcs = 0;
   8138 		txq->txq_last_hw_tucs = 0;
   8139 		*fieldsp = 0;
   8140 		*cmdp = 0;
   8141 		return;
   8142 	}
   8143 
   8144 	if ((m0->m_pkthdr.csum_flags &
   8145 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8146 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8147 	} else
   8148 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8149 
   8150 	ipcse = offset + iphl - 1;
   8151 
   8152 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8153 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8154 	seg = 0;
   8155 	fields = 0;
   8156 
   8157 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8158 		int hlen = offset + iphl;
   8159 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8160 
   8161 		if (__predict_false(m0->m_len <
   8162 				    (hlen + sizeof(struct tcphdr)))) {
   8163 			/*
   8164 			 * TCP/IP headers are not in the first mbuf; we need
   8165 			 * to do this the slow and painful way. Let's just
   8166 			 * hope this doesn't happen very often.
   8167 			 */
   8168 			struct tcphdr th;
   8169 
   8170 			WM_Q_EVCNT_INCR(txq, tsopain);
   8171 
   8172 			m_copydata(m0, hlen, sizeof(th), &th);
   8173 			if (v4) {
   8174 				struct ip ip;
   8175 
   8176 				m_copydata(m0, offset, sizeof(ip), &ip);
   8177 				ip.ip_len = 0;
   8178 				m_copyback(m0,
   8179 				    offset + offsetof(struct ip, ip_len),
   8180 				    sizeof(ip.ip_len), &ip.ip_len);
   8181 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8182 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8183 			} else {
   8184 				struct ip6_hdr ip6;
   8185 
   8186 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8187 				ip6.ip6_plen = 0;
   8188 				m_copyback(m0,
   8189 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8190 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8191 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8192 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8193 			}
   8194 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8195 			    sizeof(th.th_sum), &th.th_sum);
   8196 
   8197 			hlen += th.th_off << 2;
   8198 		} else {
   8199 			/*
   8200 			 * TCP/IP headers are in the first mbuf; we can do
   8201 			 * this the easy way.
   8202 			 */
   8203 			struct tcphdr *th;
   8204 
   8205 			if (v4) {
   8206 				struct ip *ip =
   8207 				    (void *)(mtod(m0, char *) + offset);
   8208 				th = (void *)(mtod(m0, char *) + hlen);
   8209 
   8210 				ip->ip_len = 0;
   8211 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8212 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8213 			} else {
   8214 				struct ip6_hdr *ip6 =
   8215 				    (void *)(mtod(m0, char *) + offset);
   8216 				th = (void *)(mtod(m0, char *) + hlen);
   8217 
   8218 				ip6->ip6_plen = 0;
   8219 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8220 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8221 			}
   8222 			hlen += th->th_off << 2;
   8223 		}
   8224 
   8225 		if (v4) {
   8226 			WM_Q_EVCNT_INCR(txq, tso);
   8227 			cmdlen |= WTX_TCPIP_CMD_IP;
   8228 		} else {
   8229 			WM_Q_EVCNT_INCR(txq, tso6);
   8230 			ipcse = 0;
   8231 		}
   8232 		cmd |= WTX_TCPIP_CMD_TSE;
   8233 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8234 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8235 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8236 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8237 	}
   8238 
   8239 	/*
   8240 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8241 	 * offload feature, if we load the context descriptor, we
   8242 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8243 	 */
   8244 
   8245 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8246 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8247 	    WTX_TCPIP_IPCSE(ipcse);
   8248 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8249 		WM_Q_EVCNT_INCR(txq, ipsum);
   8250 		fields |= WTX_IXSM;
   8251 	}
   8252 
   8253 	offset += iphl;
   8254 
   8255 	if (m0->m_pkthdr.csum_flags &
   8256 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8257 		WM_Q_EVCNT_INCR(txq, tusum);
   8258 		fields |= WTX_TXSM;
   8259 		tucs = WTX_TCPIP_TUCSS(offset) |
   8260 		    WTX_TCPIP_TUCSO(offset +
   8261 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8262 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8263 	} else if ((m0->m_pkthdr.csum_flags &
   8264 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8265 		WM_Q_EVCNT_INCR(txq, tusum6);
   8266 		fields |= WTX_TXSM;
   8267 		tucs = WTX_TCPIP_TUCSS(offset) |
   8268 		    WTX_TCPIP_TUCSO(offset +
   8269 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8270 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8271 	} else {
   8272 		/* Just initialize it to a valid TCP context. */
   8273 		tucs = WTX_TCPIP_TUCSS(offset) |
   8274 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8275 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8276 	}
   8277 
   8278 	*cmdp = cmd;
   8279 	*fieldsp = fields;
   8280 
   8281 	/*
   8282 	 * We don't have to write context descriptor for every packet
   8283 	 * except for 82574. For 82574, we must write context descriptor
   8284 	 * for every packet when we use two descriptor queues.
   8285 	 *
   8286 	 * The 82574L can only remember the *last* context used
   8287 	 * regardless of queue that it was use for.  We cannot reuse
   8288 	 * contexts on this hardware platform and must generate a new
   8289 	 * context every time.  82574L hardware spec, section 7.2.6,
   8290 	 * second note.
   8291 	 */
   8292 	if (sc->sc_nqueues < 2) {
   8293 		/*
   8294 		 * Setting up new checksum offload context for every
   8295 		 * frames takes a lot of processing time for hardware.
   8296 		 * This also reduces performance a lot for small sized
   8297 		 * frames so avoid it if driver can use previously
   8298 		 * configured checksum offload context.
   8299 		 * For TSO, in theory we can use the same TSO context only if
   8300 		 * frame is the same type(IP/TCP) and the same MSS. However
   8301 		 * checking whether a frame has the same IP/TCP structure is a
   8302 		 * hard thing so just ignore that and always restablish a
   8303 		 * new TSO context.
   8304 		 */
   8305 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8306 		    == 0) {
   8307 			if (txq->txq_last_hw_cmd == cmd &&
   8308 			    txq->txq_last_hw_fields == fields &&
   8309 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8310 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8311 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8312 				return;
   8313 			}
   8314 		}
   8315 
   8316 		txq->txq_last_hw_cmd = cmd;
   8317 		txq->txq_last_hw_fields = fields;
   8318 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8319 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8320 	}
   8321 
   8322 	/* Fill in the context descriptor. */
   8323 	t = (struct livengood_tcpip_ctxdesc *)
   8324 	    &txq->txq_descs[txq->txq_next];
   8325 	t->tcpip_ipcs = htole32(ipcs);
   8326 	t->tcpip_tucs = htole32(tucs);
   8327 	t->tcpip_cmdlen = htole32(cmdlen);
   8328 	t->tcpip_seg = htole32(seg);
   8329 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8330 
   8331 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8332 	txs->txs_ndesc++;
   8333 }
   8334 
   8335 static inline int
   8336 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8337 {
   8338 	struct wm_softc *sc = ifp->if_softc;
   8339 	u_int cpuid = cpu_index(curcpu());
   8340 
   8341 	/*
   8342 	 * Currently, simple distribute strategy.
   8343 	 * TODO:
   8344 	 * distribute by flowid(RSS has value).
   8345 	 */
   8346 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8347 }
   8348 
   8349 static inline bool
   8350 wm_linkdown_discard(struct wm_txqueue *txq)
   8351 {
   8352 
   8353 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8354 		return true;
   8355 
   8356 	return false;
   8357 }
   8358 
   8359 /*
   8360  * wm_start:		[ifnet interface function]
   8361  *
   8362  *	Start packet transmission on the interface.
   8363  */
   8364 static void
   8365 wm_start(struct ifnet *ifp)
   8366 {
   8367 	struct wm_softc *sc = ifp->if_softc;
   8368 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8369 
   8370 	KASSERT(if_is_mpsafe(ifp));
   8371 	/*
   8372 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8373 	 */
   8374 
   8375 	mutex_enter(txq->txq_lock);
   8376 	if (!txq->txq_stopping)
   8377 		wm_start_locked(ifp);
   8378 	mutex_exit(txq->txq_lock);
   8379 }
   8380 
   8381 static void
   8382 wm_start_locked(struct ifnet *ifp)
   8383 {
   8384 	struct wm_softc *sc = ifp->if_softc;
   8385 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8386 
   8387 	wm_send_common_locked(ifp, txq, false);
   8388 }
   8389 
   8390 static int
   8391 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8392 {
   8393 	int qid;
   8394 	struct wm_softc *sc = ifp->if_softc;
   8395 	struct wm_txqueue *txq;
   8396 
   8397 	qid = wm_select_txqueue(ifp, m);
   8398 	txq = &sc->sc_queue[qid].wmq_txq;
   8399 
   8400 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8401 		m_freem(m);
   8402 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8403 		return ENOBUFS;
   8404 	}
   8405 
   8406 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8407 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8408 	if (m->m_flags & M_MCAST)
   8409 		if_statinc_ref(nsr, if_omcasts);
   8410 	IF_STAT_PUTREF(ifp);
   8411 
   8412 	if (mutex_tryenter(txq->txq_lock)) {
   8413 		if (!txq->txq_stopping)
   8414 			wm_transmit_locked(ifp, txq);
   8415 		mutex_exit(txq->txq_lock);
   8416 	}
   8417 
   8418 	return 0;
   8419 }
   8420 
   8421 static void
   8422 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8423 {
   8424 
   8425 	wm_send_common_locked(ifp, txq, true);
   8426 }
   8427 
   8428 static void
   8429 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8430     bool is_transmit)
   8431 {
   8432 	struct wm_softc *sc = ifp->if_softc;
   8433 	struct mbuf *m0;
   8434 	struct wm_txsoft *txs;
   8435 	bus_dmamap_t dmamap;
   8436 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8437 	bus_addr_t curaddr;
   8438 	bus_size_t seglen, curlen;
   8439 	uint32_t cksumcmd;
   8440 	uint8_t cksumfields;
   8441 	bool remap = true;
   8442 
   8443 	KASSERT(mutex_owned(txq->txq_lock));
   8444 	KASSERT(!txq->txq_stopping);
   8445 
   8446 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8447 		return;
   8448 
   8449 	if (__predict_false(wm_linkdown_discard(txq))) {
   8450 		do {
   8451 			if (is_transmit)
   8452 				m0 = pcq_get(txq->txq_interq);
   8453 			else
   8454 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8455 			/*
   8456 			 * increment successed packet counter as in the case
   8457 			 * which the packet is discarded by link down PHY.
   8458 			 */
   8459 			if (m0 != NULL) {
   8460 				if_statinc(ifp, if_opackets);
   8461 				m_freem(m0);
   8462 			}
   8463 		} while (m0 != NULL);
   8464 		return;
   8465 	}
   8466 
   8467 	/* Remember the previous number of free descriptors. */
   8468 	ofree = txq->txq_free;
   8469 
   8470 	/*
   8471 	 * Loop through the send queue, setting up transmit descriptors
   8472 	 * until we drain the queue, or use up all available transmit
   8473 	 * descriptors.
   8474 	 */
   8475 	for (;;) {
   8476 		m0 = NULL;
   8477 
   8478 		/* Get a work queue entry. */
   8479 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8480 			wm_txeof(txq, UINT_MAX);
   8481 			if (txq->txq_sfree == 0) {
   8482 				DPRINTF(sc, WM_DEBUG_TX,
   8483 				    ("%s: TX: no free job descriptors\n",
   8484 					device_xname(sc->sc_dev)));
   8485 				WM_Q_EVCNT_INCR(txq, txsstall);
   8486 				break;
   8487 			}
   8488 		}
   8489 
   8490 		/* Grab a packet off the queue. */
   8491 		if (is_transmit)
   8492 			m0 = pcq_get(txq->txq_interq);
   8493 		else
   8494 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8495 		if (m0 == NULL)
   8496 			break;
   8497 
   8498 		DPRINTF(sc, WM_DEBUG_TX,
   8499 		    ("%s: TX: have packet to transmit: %p\n",
   8500 			device_xname(sc->sc_dev), m0));
   8501 
   8502 		txs = &txq->txq_soft[txq->txq_snext];
   8503 		dmamap = txs->txs_dmamap;
   8504 
   8505 		use_tso = (m0->m_pkthdr.csum_flags &
   8506 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8507 
   8508 		/*
   8509 		 * So says the Linux driver:
   8510 		 * The controller does a simple calculation to make sure
   8511 		 * there is enough room in the FIFO before initiating the
   8512 		 * DMA for each buffer. The calc is:
   8513 		 *	4 = ceil(buffer len / MSS)
   8514 		 * To make sure we don't overrun the FIFO, adjust the max
   8515 		 * buffer len if the MSS drops.
   8516 		 */
   8517 		dmamap->dm_maxsegsz =
   8518 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8519 		    ? m0->m_pkthdr.segsz << 2
   8520 		    : WTX_MAX_LEN;
   8521 
   8522 		/*
   8523 		 * Load the DMA map.  If this fails, the packet either
   8524 		 * didn't fit in the allotted number of segments, or we
   8525 		 * were short on resources.  For the too-many-segments
   8526 		 * case, we simply report an error and drop the packet,
   8527 		 * since we can't sanely copy a jumbo packet to a single
   8528 		 * buffer.
   8529 		 */
   8530 retry:
   8531 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8532 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8533 		if (__predict_false(error)) {
   8534 			if (error == EFBIG) {
   8535 				if (remap == true) {
   8536 					struct mbuf *m;
   8537 
   8538 					remap = false;
   8539 					m = m_defrag(m0, M_NOWAIT);
   8540 					if (m != NULL) {
   8541 						WM_Q_EVCNT_INCR(txq, defrag);
   8542 						m0 = m;
   8543 						goto retry;
   8544 					}
   8545 				}
   8546 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8547 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8548 				    "DMA segments, dropping...\n",
   8549 				    device_xname(sc->sc_dev));
   8550 				wm_dump_mbuf_chain(sc, m0);
   8551 				m_freem(m0);
   8552 				continue;
   8553 			}
   8554 			/* Short on resources, just stop for now. */
   8555 			DPRINTF(sc, WM_DEBUG_TX,
   8556 			    ("%s: TX: dmamap load failed: %d\n",
   8557 				device_xname(sc->sc_dev), error));
   8558 			break;
   8559 		}
   8560 
   8561 		segs_needed = dmamap->dm_nsegs;
   8562 		if (use_tso) {
   8563 			/* For sentinel descriptor; see below. */
   8564 			segs_needed++;
   8565 		}
   8566 
   8567 		/*
   8568 		 * Ensure we have enough descriptors free to describe
   8569 		 * the packet. Note, we always reserve one descriptor
   8570 		 * at the end of the ring due to the semantics of the
   8571 		 * TDT register, plus one more in the event we need
   8572 		 * to load offload context.
   8573 		 */
   8574 		if (segs_needed > txq->txq_free - 2) {
   8575 			/*
   8576 			 * Not enough free descriptors to transmit this
   8577 			 * packet.  We haven't committed anything yet,
   8578 			 * so just unload the DMA map, put the packet
   8579 			 * pack on the queue, and punt. Notify the upper
   8580 			 * layer that there are no more slots left.
   8581 			 */
   8582 			DPRINTF(sc, WM_DEBUG_TX,
   8583 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8584 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8585 				segs_needed, txq->txq_free - 1));
   8586 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8587 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8588 			WM_Q_EVCNT_INCR(txq, txdstall);
   8589 			break;
   8590 		}
   8591 
   8592 		/*
   8593 		 * Check for 82547 Tx FIFO bug. We need to do this
   8594 		 * once we know we can transmit the packet, since we
   8595 		 * do some internal FIFO space accounting here.
   8596 		 */
   8597 		if (sc->sc_type == WM_T_82547 &&
   8598 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8599 			DPRINTF(sc, WM_DEBUG_TX,
   8600 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8601 				device_xname(sc->sc_dev)));
   8602 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8603 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8604 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8605 			break;
   8606 		}
   8607 
   8608 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8609 
   8610 		DPRINTF(sc, WM_DEBUG_TX,
   8611 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8612 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8613 
   8614 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8615 
   8616 		/*
   8617 		 * Store a pointer to the packet so that we can free it
   8618 		 * later.
   8619 		 *
   8620 		 * Initially, we consider the number of descriptors the
   8621 		 * packet uses the number of DMA segments.  This may be
   8622 		 * incremented by 1 if we do checksum offload (a descriptor
   8623 		 * is used to set the checksum context).
   8624 		 */
   8625 		txs->txs_mbuf = m0;
   8626 		txs->txs_firstdesc = txq->txq_next;
   8627 		txs->txs_ndesc = segs_needed;
   8628 
   8629 		/* Set up offload parameters for this packet. */
   8630 		if (m0->m_pkthdr.csum_flags &
   8631 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8632 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8633 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8634 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8635 		} else {
   8636 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8637 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8638 			cksumcmd = 0;
   8639 			cksumfields = 0;
   8640 		}
   8641 
   8642 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8643 
   8644 		/* Sync the DMA map. */
   8645 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8646 		    BUS_DMASYNC_PREWRITE);
   8647 
   8648 		/* Initialize the transmit descriptor. */
   8649 		for (nexttx = txq->txq_next, seg = 0;
   8650 		     seg < dmamap->dm_nsegs; seg++) {
   8651 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8652 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8653 			     seglen != 0;
   8654 			     curaddr += curlen, seglen -= curlen,
   8655 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8656 				curlen = seglen;
   8657 
   8658 				/*
   8659 				 * So says the Linux driver:
   8660 				 * Work around for premature descriptor
   8661 				 * write-backs in TSO mode.  Append a
   8662 				 * 4-byte sentinel descriptor.
   8663 				 */
   8664 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8665 				    curlen > 8)
   8666 					curlen -= 4;
   8667 
   8668 				wm_set_dma_addr(
   8669 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8670 				txq->txq_descs[nexttx].wtx_cmdlen
   8671 				    = htole32(cksumcmd | curlen);
   8672 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8673 				    = 0;
   8674 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8675 				    = cksumfields;
   8676 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8677 				lasttx = nexttx;
   8678 
   8679 				DPRINTF(sc, WM_DEBUG_TX,
   8680 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8681 					"len %#04zx\n",
   8682 					device_xname(sc->sc_dev), nexttx,
   8683 					(uint64_t)curaddr, curlen));
   8684 			}
   8685 		}
   8686 
   8687 		KASSERT(lasttx != -1);
   8688 
   8689 		/*
   8690 		 * Set up the command byte on the last descriptor of
   8691 		 * the packet. If we're in the interrupt delay window,
   8692 		 * delay the interrupt.
   8693 		 */
   8694 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8695 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8696 
   8697 		/*
   8698 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8699 		 * up the descriptor to encapsulate the packet for us.
   8700 		 *
   8701 		 * This is only valid on the last descriptor of the packet.
   8702 		 */
   8703 		if (vlan_has_tag(m0)) {
   8704 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8705 			    htole32(WTX_CMD_VLE);
   8706 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8707 			    = htole16(vlan_get_tag(m0));
   8708 		}
   8709 
   8710 		txs->txs_lastdesc = lasttx;
   8711 
   8712 		DPRINTF(sc, WM_DEBUG_TX,
   8713 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8714 			device_xname(sc->sc_dev),
   8715 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8716 
   8717 		/* Sync the descriptors we're using. */
   8718 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8719 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8720 
   8721 		/* Give the packet to the chip. */
   8722 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8723 
   8724 		DPRINTF(sc, WM_DEBUG_TX,
   8725 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8726 
   8727 		DPRINTF(sc, WM_DEBUG_TX,
   8728 		    ("%s: TX: finished transmitting packet, job %d\n",
   8729 			device_xname(sc->sc_dev), txq->txq_snext));
   8730 
   8731 		/* Advance the tx pointer. */
   8732 		txq->txq_free -= txs->txs_ndesc;
   8733 		txq->txq_next = nexttx;
   8734 
   8735 		txq->txq_sfree--;
   8736 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8737 
   8738 		/* Pass the packet to any BPF listeners. */
   8739 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8740 	}
   8741 
   8742 	if (m0 != NULL) {
   8743 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8744 		WM_Q_EVCNT_INCR(txq, descdrop);
   8745 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8746 			__func__));
   8747 		m_freem(m0);
   8748 	}
   8749 
   8750 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8751 		/* No more slots; notify upper layer. */
   8752 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8753 	}
   8754 
   8755 	if (txq->txq_free != ofree) {
   8756 		/* Set a watchdog timer in case the chip flakes out. */
   8757 		txq->txq_lastsent = time_uptime;
   8758 		txq->txq_sending = true;
   8759 	}
   8760 }
   8761 
   8762 /*
   8763  * wm_nq_tx_offload:
   8764  *
   8765  *	Set up TCP/IP checksumming parameters for the
   8766  *	specified packet, for NEWQUEUE devices
   8767  */
   8768 static void
   8769 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8770     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8771 {
   8772 	struct mbuf *m0 = txs->txs_mbuf;
   8773 	uint32_t vl_len, mssidx, cmdc;
   8774 	struct ether_header *eh;
   8775 	int offset, iphl;
   8776 
   8777 	/*
   8778 	 * XXX It would be nice if the mbuf pkthdr had offset
   8779 	 * fields for the protocol headers.
   8780 	 */
   8781 	*cmdlenp = 0;
   8782 	*fieldsp = 0;
   8783 
   8784 	eh = mtod(m0, struct ether_header *);
   8785 	switch (htons(eh->ether_type)) {
   8786 	case ETHERTYPE_IP:
   8787 	case ETHERTYPE_IPV6:
   8788 		offset = ETHER_HDR_LEN;
   8789 		break;
   8790 
   8791 	case ETHERTYPE_VLAN:
   8792 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8793 		break;
   8794 
   8795 	default:
   8796 		/* Don't support this protocol or encapsulation. */
   8797 		*do_csum = false;
   8798 		return;
   8799 	}
   8800 	*do_csum = true;
   8801 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8802 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8803 
   8804 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8805 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8806 
   8807 	if ((m0->m_pkthdr.csum_flags &
   8808 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8809 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8810 	} else {
   8811 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8812 	}
   8813 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8814 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8815 
   8816 	if (vlan_has_tag(m0)) {
   8817 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8818 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8819 		*cmdlenp |= NQTX_CMD_VLE;
   8820 	}
   8821 
   8822 	mssidx = 0;
   8823 
   8824 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8825 		int hlen = offset + iphl;
   8826 		int tcp_hlen;
   8827 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8828 
   8829 		if (__predict_false(m0->m_len <
   8830 				    (hlen + sizeof(struct tcphdr)))) {
   8831 			/*
   8832 			 * TCP/IP headers are not in the first mbuf; we need
   8833 			 * to do this the slow and painful way. Let's just
   8834 			 * hope this doesn't happen very often.
   8835 			 */
   8836 			struct tcphdr th;
   8837 
   8838 			WM_Q_EVCNT_INCR(txq, tsopain);
   8839 
   8840 			m_copydata(m0, hlen, sizeof(th), &th);
   8841 			if (v4) {
   8842 				struct ip ip;
   8843 
   8844 				m_copydata(m0, offset, sizeof(ip), &ip);
   8845 				ip.ip_len = 0;
   8846 				m_copyback(m0,
   8847 				    offset + offsetof(struct ip, ip_len),
   8848 				    sizeof(ip.ip_len), &ip.ip_len);
   8849 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8850 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8851 			} else {
   8852 				struct ip6_hdr ip6;
   8853 
   8854 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8855 				ip6.ip6_plen = 0;
   8856 				m_copyback(m0,
   8857 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8858 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8859 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8860 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8861 			}
   8862 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8863 			    sizeof(th.th_sum), &th.th_sum);
   8864 
   8865 			tcp_hlen = th.th_off << 2;
   8866 		} else {
   8867 			/*
   8868 			 * TCP/IP headers are in the first mbuf; we can do
   8869 			 * this the easy way.
   8870 			 */
   8871 			struct tcphdr *th;
   8872 
   8873 			if (v4) {
   8874 				struct ip *ip =
   8875 				    (void *)(mtod(m0, char *) + offset);
   8876 				th = (void *)(mtod(m0, char *) + hlen);
   8877 
   8878 				ip->ip_len = 0;
   8879 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8880 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8881 			} else {
   8882 				struct ip6_hdr *ip6 =
   8883 				    (void *)(mtod(m0, char *) + offset);
   8884 				th = (void *)(mtod(m0, char *) + hlen);
   8885 
   8886 				ip6->ip6_plen = 0;
   8887 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8888 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8889 			}
   8890 			tcp_hlen = th->th_off << 2;
   8891 		}
   8892 		hlen += tcp_hlen;
   8893 		*cmdlenp |= NQTX_CMD_TSE;
   8894 
   8895 		if (v4) {
   8896 			WM_Q_EVCNT_INCR(txq, tso);
   8897 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8898 		} else {
   8899 			WM_Q_EVCNT_INCR(txq, tso6);
   8900 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8901 		}
   8902 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8903 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8904 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8905 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8906 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8907 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8908 	} else {
   8909 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8910 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8911 	}
   8912 
   8913 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8914 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8915 		cmdc |= NQTXC_CMD_IP4;
   8916 	}
   8917 
   8918 	if (m0->m_pkthdr.csum_flags &
   8919 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8920 		WM_Q_EVCNT_INCR(txq, tusum);
   8921 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8922 			cmdc |= NQTXC_CMD_TCP;
   8923 		else
   8924 			cmdc |= NQTXC_CMD_UDP;
   8925 
   8926 		cmdc |= NQTXC_CMD_IP4;
   8927 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8928 	}
   8929 	if (m0->m_pkthdr.csum_flags &
   8930 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8931 		WM_Q_EVCNT_INCR(txq, tusum6);
   8932 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8933 			cmdc |= NQTXC_CMD_TCP;
   8934 		else
   8935 			cmdc |= NQTXC_CMD_UDP;
   8936 
   8937 		cmdc |= NQTXC_CMD_IP6;
   8938 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8939 	}
   8940 
   8941 	/*
   8942 	 * We don't have to write context descriptor for every packet to
   8943 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8944 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8945 	 * controllers.
   8946 	 * It would be overhead to write context descriptor for every packet,
   8947 	 * however it does not cause problems.
   8948 	 */
   8949 	/* Fill in the context descriptor. */
   8950 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
   8951 	    htole32(vl_len);
   8952 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
   8953 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
   8954 	    htole32(cmdc);
   8955 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
   8956 	    htole32(mssidx);
   8957 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8958 	DPRINTF(sc, WM_DEBUG_TX,
   8959 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8960 		txq->txq_next, 0, vl_len));
   8961 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8962 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8963 	txs->txs_ndesc++;
   8964 }
   8965 
   8966 /*
   8967  * wm_nq_start:		[ifnet interface function]
   8968  *
   8969  *	Start packet transmission on the interface for NEWQUEUE devices
   8970  */
   8971 static void
   8972 wm_nq_start(struct ifnet *ifp)
   8973 {
   8974 	struct wm_softc *sc = ifp->if_softc;
   8975 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8976 
   8977 	KASSERT(if_is_mpsafe(ifp));
   8978 	/*
   8979 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8980 	 */
   8981 
   8982 	mutex_enter(txq->txq_lock);
   8983 	if (!txq->txq_stopping)
   8984 		wm_nq_start_locked(ifp);
   8985 	mutex_exit(txq->txq_lock);
   8986 }
   8987 
   8988 static void
   8989 wm_nq_start_locked(struct ifnet *ifp)
   8990 {
   8991 	struct wm_softc *sc = ifp->if_softc;
   8992 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8993 
   8994 	wm_nq_send_common_locked(ifp, txq, false);
   8995 }
   8996 
   8997 static int
   8998 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8999 {
   9000 	int qid;
   9001 	struct wm_softc *sc = ifp->if_softc;
   9002 	struct wm_txqueue *txq;
   9003 
   9004 	qid = wm_select_txqueue(ifp, m);
   9005 	txq = &sc->sc_queue[qid].wmq_txq;
   9006 
   9007 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   9008 		m_freem(m);
   9009 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   9010 		return ENOBUFS;
   9011 	}
   9012 
   9013 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   9014 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9015 	if (m->m_flags & M_MCAST)
   9016 		if_statinc_ref(nsr, if_omcasts);
   9017 	IF_STAT_PUTREF(ifp);
   9018 
   9019 	/*
   9020 	 * The situations which this mutex_tryenter() fails at running time
   9021 	 * are below two patterns.
   9022 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9023 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9024 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9025 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9026 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9027 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9028 	 * stuck, either.
   9029 	 */
   9030 	if (mutex_tryenter(txq->txq_lock)) {
   9031 		if (!txq->txq_stopping)
   9032 			wm_nq_transmit_locked(ifp, txq);
   9033 		mutex_exit(txq->txq_lock);
   9034 	}
   9035 
   9036 	return 0;
   9037 }
   9038 
   9039 static void
   9040 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9041 {
   9042 
   9043 	wm_nq_send_common_locked(ifp, txq, true);
   9044 }
   9045 
   9046 static void
   9047 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9048     bool is_transmit)
   9049 {
   9050 	struct wm_softc *sc = ifp->if_softc;
   9051 	struct mbuf *m0;
   9052 	struct wm_txsoft *txs;
   9053 	bus_dmamap_t dmamap;
   9054 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9055 	bool do_csum, sent;
   9056 	bool remap = true;
   9057 
   9058 	KASSERT(mutex_owned(txq->txq_lock));
   9059 	KASSERT(!txq->txq_stopping);
   9060 
   9061 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9062 		return;
   9063 
   9064 	if (__predict_false(wm_linkdown_discard(txq))) {
   9065 		do {
   9066 			if (is_transmit)
   9067 				m0 = pcq_get(txq->txq_interq);
   9068 			else
   9069 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9070 			/*
   9071 			 * increment successed packet counter as in the case
   9072 			 * which the packet is discarded by link down PHY.
   9073 			 */
   9074 			if (m0 != NULL) {
   9075 				if_statinc(ifp, if_opackets);
   9076 				m_freem(m0);
   9077 			}
   9078 		} while (m0 != NULL);
   9079 		return;
   9080 	}
   9081 
   9082 	sent = false;
   9083 
   9084 	/*
   9085 	 * Loop through the send queue, setting up transmit descriptors
   9086 	 * until we drain the queue, or use up all available transmit
   9087 	 * descriptors.
   9088 	 */
   9089 	for (;;) {
   9090 		m0 = NULL;
   9091 
   9092 		/* Get a work queue entry. */
   9093 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9094 			wm_txeof(txq, UINT_MAX);
   9095 			if (txq->txq_sfree == 0) {
   9096 				DPRINTF(sc, WM_DEBUG_TX,
   9097 				    ("%s: TX: no free job descriptors\n",
   9098 					device_xname(sc->sc_dev)));
   9099 				WM_Q_EVCNT_INCR(txq, txsstall);
   9100 				break;
   9101 			}
   9102 		}
   9103 
   9104 		/* Grab a packet off the queue. */
   9105 		if (is_transmit)
   9106 			m0 = pcq_get(txq->txq_interq);
   9107 		else
   9108 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9109 		if (m0 == NULL)
   9110 			break;
   9111 
   9112 		DPRINTF(sc, WM_DEBUG_TX,
   9113 		    ("%s: TX: have packet to transmit: %p\n",
   9114 			device_xname(sc->sc_dev), m0));
   9115 
   9116 		txs = &txq->txq_soft[txq->txq_snext];
   9117 		dmamap = txs->txs_dmamap;
   9118 
   9119 		/*
   9120 		 * Load the DMA map.  If this fails, the packet either
   9121 		 * didn't fit in the allotted number of segments, or we
   9122 		 * were short on resources.  For the too-many-segments
   9123 		 * case, we simply report an error and drop the packet,
   9124 		 * since we can't sanely copy a jumbo packet to a single
   9125 		 * buffer.
   9126 		 */
   9127 retry:
   9128 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9129 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9130 		if (__predict_false(error)) {
   9131 			if (error == EFBIG) {
   9132 				if (remap == true) {
   9133 					struct mbuf *m;
   9134 
   9135 					remap = false;
   9136 					m = m_defrag(m0, M_NOWAIT);
   9137 					if (m != NULL) {
   9138 						WM_Q_EVCNT_INCR(txq, defrag);
   9139 						m0 = m;
   9140 						goto retry;
   9141 					}
   9142 				}
   9143 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9144 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9145 				    "DMA segments, dropping...\n",
   9146 				    device_xname(sc->sc_dev));
   9147 				wm_dump_mbuf_chain(sc, m0);
   9148 				m_freem(m0);
   9149 				continue;
   9150 			}
   9151 			/* Short on resources, just stop for now. */
   9152 			DPRINTF(sc, WM_DEBUG_TX,
   9153 			    ("%s: TX: dmamap load failed: %d\n",
   9154 				device_xname(sc->sc_dev), error));
   9155 			break;
   9156 		}
   9157 
   9158 		segs_needed = dmamap->dm_nsegs;
   9159 
   9160 		/*
   9161 		 * Ensure we have enough descriptors free to describe
   9162 		 * the packet. Note, we always reserve one descriptor
   9163 		 * at the end of the ring due to the semantics of the
   9164 		 * TDT register, plus one more in the event we need
   9165 		 * to load offload context.
   9166 		 */
   9167 		if (segs_needed > txq->txq_free - 2) {
   9168 			/*
   9169 			 * Not enough free descriptors to transmit this
   9170 			 * packet.  We haven't committed anything yet,
   9171 			 * so just unload the DMA map, put the packet
   9172 			 * pack on the queue, and punt. Notify the upper
   9173 			 * layer that there are no more slots left.
   9174 			 */
   9175 			DPRINTF(sc, WM_DEBUG_TX,
   9176 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9177 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9178 				segs_needed, txq->txq_free - 1));
   9179 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9180 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9181 			WM_Q_EVCNT_INCR(txq, txdstall);
   9182 			break;
   9183 		}
   9184 
   9185 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9186 
   9187 		DPRINTF(sc, WM_DEBUG_TX,
   9188 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9189 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9190 
   9191 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9192 
   9193 		/*
   9194 		 * Store a pointer to the packet so that we can free it
   9195 		 * later.
   9196 		 *
   9197 		 * Initially, we consider the number of descriptors the
   9198 		 * packet uses the number of DMA segments.  This may be
   9199 		 * incremented by 1 if we do checksum offload (a descriptor
   9200 		 * is used to set the checksum context).
   9201 		 */
   9202 		txs->txs_mbuf = m0;
   9203 		txs->txs_firstdesc = txq->txq_next;
   9204 		txs->txs_ndesc = segs_needed;
   9205 
   9206 		/* Set up offload parameters for this packet. */
   9207 		uint32_t cmdlen, fields, dcmdlen;
   9208 		if (m0->m_pkthdr.csum_flags &
   9209 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9210 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9211 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9212 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9213 			    &do_csum);
   9214 		} else {
   9215 			do_csum = false;
   9216 			cmdlen = 0;
   9217 			fields = 0;
   9218 		}
   9219 
   9220 		/* Sync the DMA map. */
   9221 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9222 		    BUS_DMASYNC_PREWRITE);
   9223 
   9224 		/* Initialize the first transmit descriptor. */
   9225 		nexttx = txq->txq_next;
   9226 		if (!do_csum) {
   9227 			/* Set up a legacy descriptor */
   9228 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9229 			    dmamap->dm_segs[0].ds_addr);
   9230 			txq->txq_descs[nexttx].wtx_cmdlen =
   9231 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9232 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9233 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9234 			if (vlan_has_tag(m0)) {
   9235 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9236 				    htole32(WTX_CMD_VLE);
   9237 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9238 				    htole16(vlan_get_tag(m0));
   9239 			} else
   9240 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9241 
   9242 			dcmdlen = 0;
   9243 		} else {
   9244 			/* Set up an advanced data descriptor */
   9245 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9246 			    htole64(dmamap->dm_segs[0].ds_addr);
   9247 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9248 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9249 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9250 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9251 			    htole32(fields);
   9252 			DPRINTF(sc, WM_DEBUG_TX,
   9253 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9254 				device_xname(sc->sc_dev), nexttx,
   9255 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9256 			DPRINTF(sc, WM_DEBUG_TX,
   9257 			    ("\t 0x%08x%08x\n", fields,
   9258 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9259 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9260 		}
   9261 
   9262 		lasttx = nexttx;
   9263 		nexttx = WM_NEXTTX(txq, nexttx);
   9264 		/*
   9265 		 * Fill in the next descriptors. Legacy or advanced format
   9266 		 * is the same here.
   9267 		 */
   9268 		for (seg = 1; seg < dmamap->dm_nsegs;
   9269 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9270 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9271 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9272 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9273 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9274 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9275 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9276 			lasttx = nexttx;
   9277 
   9278 			DPRINTF(sc, WM_DEBUG_TX,
   9279 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9280 				device_xname(sc->sc_dev), nexttx,
   9281 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9282 				dmamap->dm_segs[seg].ds_len));
   9283 		}
   9284 
   9285 		KASSERT(lasttx != -1);
   9286 
   9287 		/*
   9288 		 * Set up the command byte on the last descriptor of
   9289 		 * the packet. If we're in the interrupt delay window,
   9290 		 * delay the interrupt.
   9291 		 */
   9292 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9293 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9294 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9295 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9296 
   9297 		txs->txs_lastdesc = lasttx;
   9298 
   9299 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9300 		    device_xname(sc->sc_dev),
   9301 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9302 
   9303 		/* Sync the descriptors we're using. */
   9304 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9305 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9306 
   9307 		/* Give the packet to the chip. */
   9308 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9309 		sent = true;
   9310 
   9311 		DPRINTF(sc, WM_DEBUG_TX,
   9312 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9313 
   9314 		DPRINTF(sc, WM_DEBUG_TX,
   9315 		    ("%s: TX: finished transmitting packet, job %d\n",
   9316 			device_xname(sc->sc_dev), txq->txq_snext));
   9317 
   9318 		/* Advance the tx pointer. */
   9319 		txq->txq_free -= txs->txs_ndesc;
   9320 		txq->txq_next = nexttx;
   9321 
   9322 		txq->txq_sfree--;
   9323 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9324 
   9325 		/* Pass the packet to any BPF listeners. */
   9326 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9327 	}
   9328 
   9329 	if (m0 != NULL) {
   9330 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9331 		WM_Q_EVCNT_INCR(txq, descdrop);
   9332 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9333 			__func__));
   9334 		m_freem(m0);
   9335 	}
   9336 
   9337 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9338 		/* No more slots; notify upper layer. */
   9339 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9340 	}
   9341 
   9342 	if (sent) {
   9343 		/* Set a watchdog timer in case the chip flakes out. */
   9344 		txq->txq_lastsent = time_uptime;
   9345 		txq->txq_sending = true;
   9346 	}
   9347 }
   9348 
   9349 static void
   9350 wm_deferred_start_locked(struct wm_txqueue *txq)
   9351 {
   9352 	struct wm_softc *sc = txq->txq_sc;
   9353 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9354 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9355 	int qid = wmq->wmq_id;
   9356 
   9357 	KASSERT(mutex_owned(txq->txq_lock));
   9358 	KASSERT(!txq->txq_stopping);
   9359 
   9360 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9361 		/* XXX need for ALTQ or one CPU system */
   9362 		if (qid == 0)
   9363 			wm_nq_start_locked(ifp);
   9364 		wm_nq_transmit_locked(ifp, txq);
   9365 	} else {
   9366 		/* XXX need for ALTQ or one CPU system */
   9367 		if (qid == 0)
   9368 			wm_start_locked(ifp);
   9369 		wm_transmit_locked(ifp, txq);
   9370 	}
   9371 }
   9372 
   9373 /* Interrupt */
   9374 
   9375 /*
   9376  * wm_txeof:
   9377  *
   9378  *	Helper; handle transmit interrupts.
   9379  */
   9380 static bool
   9381 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9382 {
   9383 	struct wm_softc *sc = txq->txq_sc;
   9384 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9385 	struct wm_txsoft *txs;
   9386 	int count = 0;
   9387 	int i;
   9388 	uint8_t status;
   9389 	bool more = false;
   9390 
   9391 	KASSERT(mutex_owned(txq->txq_lock));
   9392 
   9393 	if (txq->txq_stopping)
   9394 		return false;
   9395 
   9396 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9397 
   9398 	/*
   9399 	 * Go through the Tx list and free mbufs for those
   9400 	 * frames which have been transmitted.
   9401 	 */
   9402 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9403 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9404 		txs = &txq->txq_soft[i];
   9405 
   9406 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9407 			device_xname(sc->sc_dev), i));
   9408 
   9409 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9410 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9411 
   9412 		status =
   9413 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9414 		if ((status & WTX_ST_DD) == 0) {
   9415 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9416 			    BUS_DMASYNC_PREREAD);
   9417 			break;
   9418 		}
   9419 
   9420 		if (limit-- == 0) {
   9421 			more = true;
   9422 			DPRINTF(sc, WM_DEBUG_TX,
   9423 			    ("%s: TX: loop limited, job %d is not processed\n",
   9424 				device_xname(sc->sc_dev), i));
   9425 			break;
   9426 		}
   9427 
   9428 		count++;
   9429 		DPRINTF(sc, WM_DEBUG_TX,
   9430 		    ("%s: TX: job %d done: descs %d..%d\n",
   9431 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9432 		    txs->txs_lastdesc));
   9433 
   9434 		/*
   9435 		 * XXX We should probably be using the statistics
   9436 		 * XXX registers, but I don't know if they exist
   9437 		 * XXX on chips before the i82544.
   9438 		 */
   9439 
   9440 #ifdef WM_EVENT_COUNTERS
   9441 		if (status & WTX_ST_TU)
   9442 			WM_Q_EVCNT_INCR(txq, underrun);
   9443 #endif /* WM_EVENT_COUNTERS */
   9444 
   9445 		/*
   9446 		 * 82574 and newer's document says the status field has neither
   9447 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9448 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9449 		 * Developer's Manual", 82574 datasheet and newer.
   9450 		 *
   9451 		 * XXX I saw the LC bit was set on I218 even though the media
   9452 		 * was full duplex, so the bit might be used for other
   9453 		 * meaning ...(I have no document).
   9454 		 */
   9455 
   9456 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9457 		    && ((sc->sc_type < WM_T_82574)
   9458 			|| (sc->sc_type == WM_T_80003))) {
   9459 			if_statinc(ifp, if_oerrors);
   9460 			if (status & WTX_ST_LC)
   9461 				log(LOG_WARNING, "%s: late collision\n",
   9462 				    device_xname(sc->sc_dev));
   9463 			else if (status & WTX_ST_EC) {
   9464 				if_statadd(ifp, if_collisions,
   9465 				    TX_COLLISION_THRESHOLD + 1);
   9466 				log(LOG_WARNING, "%s: excessive collisions\n",
   9467 				    device_xname(sc->sc_dev));
   9468 			}
   9469 		} else
   9470 			if_statinc(ifp, if_opackets);
   9471 
   9472 		txq->txq_packets++;
   9473 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9474 
   9475 		txq->txq_free += txs->txs_ndesc;
   9476 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9477 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9478 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9479 		m_freem(txs->txs_mbuf);
   9480 		txs->txs_mbuf = NULL;
   9481 	}
   9482 
   9483 	/* Update the dirty transmit buffer pointer. */
   9484 	txq->txq_sdirty = i;
   9485 	DPRINTF(sc, WM_DEBUG_TX,
   9486 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9487 
   9488 	if (count != 0)
   9489 		rnd_add_uint32(&sc->rnd_source, count);
   9490 
   9491 	/*
   9492 	 * If there are no more pending transmissions, cancel the watchdog
   9493 	 * timer.
   9494 	 */
   9495 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9496 		txq->txq_sending = false;
   9497 
   9498 	return more;
   9499 }
   9500 
   9501 static inline uint32_t
   9502 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9503 {
   9504 	struct wm_softc *sc = rxq->rxq_sc;
   9505 
   9506 	if (sc->sc_type == WM_T_82574)
   9507 		return EXTRXC_STATUS(
   9508 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9509 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9510 		return NQRXC_STATUS(
   9511 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9512 	else
   9513 		return rxq->rxq_descs[idx].wrx_status;
   9514 }
   9515 
   9516 static inline uint32_t
   9517 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9518 {
   9519 	struct wm_softc *sc = rxq->rxq_sc;
   9520 
   9521 	if (sc->sc_type == WM_T_82574)
   9522 		return EXTRXC_ERROR(
   9523 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9524 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9525 		return NQRXC_ERROR(
   9526 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9527 	else
   9528 		return rxq->rxq_descs[idx].wrx_errors;
   9529 }
   9530 
   9531 static inline uint16_t
   9532 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9533 {
   9534 	struct wm_softc *sc = rxq->rxq_sc;
   9535 
   9536 	if (sc->sc_type == WM_T_82574)
   9537 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9538 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9539 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9540 	else
   9541 		return rxq->rxq_descs[idx].wrx_special;
   9542 }
   9543 
   9544 static inline int
   9545 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9546 {
   9547 	struct wm_softc *sc = rxq->rxq_sc;
   9548 
   9549 	if (sc->sc_type == WM_T_82574)
   9550 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9551 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9552 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9553 	else
   9554 		return rxq->rxq_descs[idx].wrx_len;
   9555 }
   9556 
   9557 #ifdef WM_DEBUG
   9558 static inline uint32_t
   9559 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9560 {
   9561 	struct wm_softc *sc = rxq->rxq_sc;
   9562 
   9563 	if (sc->sc_type == WM_T_82574)
   9564 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9565 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9566 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9567 	else
   9568 		return 0;
   9569 }
   9570 
   9571 static inline uint8_t
   9572 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9573 {
   9574 	struct wm_softc *sc = rxq->rxq_sc;
   9575 
   9576 	if (sc->sc_type == WM_T_82574)
   9577 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9578 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9579 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9580 	else
   9581 		return 0;
   9582 }
   9583 #endif /* WM_DEBUG */
   9584 
   9585 static inline bool
   9586 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9587     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9588 {
   9589 
   9590 	if (sc->sc_type == WM_T_82574)
   9591 		return (status & ext_bit) != 0;
   9592 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9593 		return (status & nq_bit) != 0;
   9594 	else
   9595 		return (status & legacy_bit) != 0;
   9596 }
   9597 
   9598 static inline bool
   9599 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9600     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9601 {
   9602 
   9603 	if (sc->sc_type == WM_T_82574)
   9604 		return (error & ext_bit) != 0;
   9605 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9606 		return (error & nq_bit) != 0;
   9607 	else
   9608 		return (error & legacy_bit) != 0;
   9609 }
   9610 
   9611 static inline bool
   9612 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9613 {
   9614 
   9615 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9616 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9617 		return true;
   9618 	else
   9619 		return false;
   9620 }
   9621 
   9622 static inline bool
   9623 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9624 {
   9625 	struct wm_softc *sc = rxq->rxq_sc;
   9626 
   9627 	/* XXX missing error bit for newqueue? */
   9628 	if (wm_rxdesc_is_set_error(sc, errors,
   9629 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9630 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9631 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9632 		NQRXC_ERROR_RXE)) {
   9633 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9634 		    EXTRXC_ERROR_SE, 0))
   9635 			log(LOG_WARNING, "%s: symbol error\n",
   9636 			    device_xname(sc->sc_dev));
   9637 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9638 		    EXTRXC_ERROR_SEQ, 0))
   9639 			log(LOG_WARNING, "%s: receive sequence error\n",
   9640 			    device_xname(sc->sc_dev));
   9641 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9642 		    EXTRXC_ERROR_CE, 0))
   9643 			log(LOG_WARNING, "%s: CRC error\n",
   9644 			    device_xname(sc->sc_dev));
   9645 		return true;
   9646 	}
   9647 
   9648 	return false;
   9649 }
   9650 
   9651 static inline bool
   9652 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9653 {
   9654 	struct wm_softc *sc = rxq->rxq_sc;
   9655 
   9656 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9657 		NQRXC_STATUS_DD)) {
   9658 		/* We have processed all of the receive descriptors. */
   9659 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9660 		return false;
   9661 	}
   9662 
   9663 	return true;
   9664 }
   9665 
   9666 static inline bool
   9667 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9668     uint16_t vlantag, struct mbuf *m)
   9669 {
   9670 
   9671 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9672 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9673 		vlan_set_tag(m, le16toh(vlantag));
   9674 	}
   9675 
   9676 	return true;
   9677 }
   9678 
   9679 static inline void
   9680 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9681     uint32_t errors, struct mbuf *m)
   9682 {
   9683 	struct wm_softc *sc = rxq->rxq_sc;
   9684 
   9685 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9686 		if (wm_rxdesc_is_set_status(sc, status,
   9687 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9688 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9689 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9690 			if (wm_rxdesc_is_set_error(sc, errors,
   9691 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9692 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9693 		}
   9694 		if (wm_rxdesc_is_set_status(sc, status,
   9695 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9696 			/*
   9697 			 * Note: we don't know if this was TCP or UDP,
   9698 			 * so we just set both bits, and expect the
   9699 			 * upper layers to deal.
   9700 			 */
   9701 			WM_Q_EVCNT_INCR(rxq, tusum);
   9702 			m->m_pkthdr.csum_flags |=
   9703 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9704 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9705 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9706 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9707 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9708 		}
   9709 	}
   9710 }
   9711 
   9712 /*
   9713  * wm_rxeof:
   9714  *
   9715  *	Helper; handle receive interrupts.
   9716  */
   9717 static bool
   9718 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9719 {
   9720 	struct wm_softc *sc = rxq->rxq_sc;
   9721 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9722 	struct wm_rxsoft *rxs;
   9723 	struct mbuf *m;
   9724 	int i, len;
   9725 	int count = 0;
   9726 	uint32_t status, errors;
   9727 	uint16_t vlantag;
   9728 	bool more = false;
   9729 
   9730 	KASSERT(mutex_owned(rxq->rxq_lock));
   9731 
   9732 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9733 		rxs = &rxq->rxq_soft[i];
   9734 
   9735 		DPRINTF(sc, WM_DEBUG_RX,
   9736 		    ("%s: RX: checking descriptor %d\n",
   9737 			device_xname(sc->sc_dev), i));
   9738 		wm_cdrxsync(rxq, i,
   9739 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9740 
   9741 		status = wm_rxdesc_get_status(rxq, i);
   9742 		errors = wm_rxdesc_get_errors(rxq, i);
   9743 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9744 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9745 #ifdef WM_DEBUG
   9746 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9747 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9748 #endif
   9749 
   9750 		if (!wm_rxdesc_dd(rxq, i, status))
   9751 			break;
   9752 
   9753 		if (limit-- == 0) {
   9754 			more = true;
   9755 			DPRINTF(sc, WM_DEBUG_RX,
   9756 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9757 				device_xname(sc->sc_dev), i));
   9758 			break;
   9759 		}
   9760 
   9761 		count++;
   9762 		if (__predict_false(rxq->rxq_discard)) {
   9763 			DPRINTF(sc, WM_DEBUG_RX,
   9764 			    ("%s: RX: discarding contents of descriptor %d\n",
   9765 				device_xname(sc->sc_dev), i));
   9766 			wm_init_rxdesc(rxq, i);
   9767 			if (wm_rxdesc_is_eop(rxq, status)) {
   9768 				/* Reset our state. */
   9769 				DPRINTF(sc, WM_DEBUG_RX,
   9770 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9771 					device_xname(sc->sc_dev)));
   9772 				rxq->rxq_discard = 0;
   9773 			}
   9774 			continue;
   9775 		}
   9776 
   9777 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9778 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9779 
   9780 		m = rxs->rxs_mbuf;
   9781 
   9782 		/*
   9783 		 * Add a new receive buffer to the ring, unless of
   9784 		 * course the length is zero. Treat the latter as a
   9785 		 * failed mapping.
   9786 		 */
   9787 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9788 			/*
   9789 			 * Failed, throw away what we've done so
   9790 			 * far, and discard the rest of the packet.
   9791 			 */
   9792 			if_statinc(ifp, if_ierrors);
   9793 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9794 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9795 			wm_init_rxdesc(rxq, i);
   9796 			if (!wm_rxdesc_is_eop(rxq, status))
   9797 				rxq->rxq_discard = 1;
   9798 			if (rxq->rxq_head != NULL)
   9799 				m_freem(rxq->rxq_head);
   9800 			WM_RXCHAIN_RESET(rxq);
   9801 			DPRINTF(sc, WM_DEBUG_RX,
   9802 			    ("%s: RX: Rx buffer allocation failed, "
   9803 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9804 				rxq->rxq_discard ? " (discard)" : ""));
   9805 			continue;
   9806 		}
   9807 
   9808 		m->m_len = len;
   9809 		rxq->rxq_len += len;
   9810 		DPRINTF(sc, WM_DEBUG_RX,
   9811 		    ("%s: RX: buffer at %p len %d\n",
   9812 			device_xname(sc->sc_dev), m->m_data, len));
   9813 
   9814 		/* If this is not the end of the packet, keep looking. */
   9815 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9816 			WM_RXCHAIN_LINK(rxq, m);
   9817 			DPRINTF(sc, WM_DEBUG_RX,
   9818 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9819 				device_xname(sc->sc_dev), rxq->rxq_len));
   9820 			continue;
   9821 		}
   9822 
   9823 		/*
   9824 		 * Okay, we have the entire packet now. The chip is
   9825 		 * configured to include the FCS except I35[04], I21[01].
   9826 		 * (not all chips can be configured to strip it), so we need
   9827 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9828 		 * in RCTL register is always set, so we don't trim it.
   9829 		 * PCH2 and newer chip also not include FCS when jumbo
   9830 		 * frame is used to do workaround an errata.
   9831 		 * May need to adjust length of previous mbuf in the
   9832 		 * chain if the current mbuf is too short.
   9833 		 */
   9834 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9835 			if (m->m_len < ETHER_CRC_LEN) {
   9836 				rxq->rxq_tail->m_len
   9837 				    -= (ETHER_CRC_LEN - m->m_len);
   9838 				m->m_len = 0;
   9839 			} else
   9840 				m->m_len -= ETHER_CRC_LEN;
   9841 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9842 		} else
   9843 			len = rxq->rxq_len;
   9844 
   9845 		WM_RXCHAIN_LINK(rxq, m);
   9846 
   9847 		*rxq->rxq_tailp = NULL;
   9848 		m = rxq->rxq_head;
   9849 
   9850 		WM_RXCHAIN_RESET(rxq);
   9851 
   9852 		DPRINTF(sc, WM_DEBUG_RX,
   9853 		    ("%s: RX: have entire packet, len -> %d\n",
   9854 			device_xname(sc->sc_dev), len));
   9855 
   9856 		/* If an error occurred, update stats and drop the packet. */
   9857 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9858 			m_freem(m);
   9859 			continue;
   9860 		}
   9861 
   9862 		/* No errors.  Receive the packet. */
   9863 		m_set_rcvif(m, ifp);
   9864 		m->m_pkthdr.len = len;
   9865 		/*
   9866 		 * TODO
   9867 		 * should be save rsshash and rsstype to this mbuf.
   9868 		 */
   9869 		DPRINTF(sc, WM_DEBUG_RX,
   9870 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9871 			device_xname(sc->sc_dev), rsstype, rsshash));
   9872 
   9873 		/*
   9874 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9875 		 * for us.  Associate the tag with the packet.
   9876 		 */
   9877 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9878 			continue;
   9879 
   9880 		/* Set up checksum info for this packet. */
   9881 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9882 
   9883 		rxq->rxq_packets++;
   9884 		rxq->rxq_bytes += len;
   9885 		/* Pass it on. */
   9886 		if_percpuq_enqueue(sc->sc_ipq, m);
   9887 
   9888 		if (rxq->rxq_stopping)
   9889 			break;
   9890 	}
   9891 	rxq->rxq_ptr = i;
   9892 
   9893 	if (count != 0)
   9894 		rnd_add_uint32(&sc->rnd_source, count);
   9895 
   9896 	DPRINTF(sc, WM_DEBUG_RX,
   9897 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9898 
   9899 	return more;
   9900 }
   9901 
   9902 /*
   9903  * wm_linkintr_gmii:
   9904  *
   9905  *	Helper; handle link interrupts for GMII.
   9906  */
   9907 static void
   9908 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9909 {
   9910 	device_t dev = sc->sc_dev;
   9911 	uint32_t status, reg;
   9912 	bool link;
   9913 	int rv;
   9914 
   9915 	KASSERT(mutex_owned(sc->sc_core_lock));
   9916 
   9917 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9918 		__func__));
   9919 
   9920 	if ((icr & ICR_LSC) == 0) {
   9921 		if (icr & ICR_RXSEQ)
   9922 			DPRINTF(sc, WM_DEBUG_LINK,
   9923 			    ("%s: LINK Receive sequence error\n",
   9924 				device_xname(dev)));
   9925 		return;
   9926 	}
   9927 
   9928 	/* Link status changed */
   9929 	status = CSR_READ(sc, WMREG_STATUS);
   9930 	link = status & STATUS_LU;
   9931 	if (link) {
   9932 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9933 			device_xname(dev),
   9934 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9935 		if (wm_phy_need_linkdown_discard(sc)) {
   9936 			DPRINTF(sc, WM_DEBUG_LINK,
   9937 			    ("%s: linkintr: Clear linkdown discard flag\n",
   9938 				device_xname(dev)));
   9939 			wm_clear_linkdown_discard(sc);
   9940 		}
   9941 	} else {
   9942 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9943 			device_xname(dev)));
   9944 		if (wm_phy_need_linkdown_discard(sc)) {
   9945 			DPRINTF(sc, WM_DEBUG_LINK,
   9946 			    ("%s: linkintr: Set linkdown discard flag\n",
   9947 				device_xname(dev)));
   9948 			wm_set_linkdown_discard(sc);
   9949 		}
   9950 	}
   9951 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9952 		wm_gig_downshift_workaround_ich8lan(sc);
   9953 
   9954 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   9955 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9956 
   9957 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9958 		device_xname(dev)));
   9959 	mii_pollstat(&sc->sc_mii);
   9960 	if (sc->sc_type == WM_T_82543) {
   9961 		int miistatus, active;
   9962 
   9963 		/*
   9964 		 * With 82543, we need to force speed and
   9965 		 * duplex on the MAC equal to what the PHY
   9966 		 * speed and duplex configuration is.
   9967 		 */
   9968 		miistatus = sc->sc_mii.mii_media_status;
   9969 
   9970 		if (miistatus & IFM_ACTIVE) {
   9971 			active = sc->sc_mii.mii_media_active;
   9972 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9973 			switch (IFM_SUBTYPE(active)) {
   9974 			case IFM_10_T:
   9975 				sc->sc_ctrl |= CTRL_SPEED_10;
   9976 				break;
   9977 			case IFM_100_TX:
   9978 				sc->sc_ctrl |= CTRL_SPEED_100;
   9979 				break;
   9980 			case IFM_1000_T:
   9981 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9982 				break;
   9983 			default:
   9984 				/*
   9985 				 * Fiber?
   9986 				 * Shoud not enter here.
   9987 				 */
   9988 				device_printf(dev, "unknown media (%x)\n",
   9989 				    active);
   9990 				break;
   9991 			}
   9992 			if (active & IFM_FDX)
   9993 				sc->sc_ctrl |= CTRL_FD;
   9994 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9995 		}
   9996 	} else if (sc->sc_type == WM_T_PCH) {
   9997 		wm_k1_gig_workaround_hv(sc,
   9998 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9999 	}
   10000 
   10001 	/*
   10002 	 * When connected at 10Mbps half-duplex, some parts are excessively
   10003 	 * aggressive resulting in many collisions. To avoid this, increase
   10004 	 * the IPG and reduce Rx latency in the PHY.
   10005 	 */
   10006 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   10007 	    && link) {
   10008 		uint32_t tipg_reg;
   10009 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   10010 		bool fdx;
   10011 		uint16_t emi_addr, emi_val;
   10012 
   10013 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   10014 		tipg_reg &= ~TIPG_IPGT_MASK;
   10015 		fdx = status & STATUS_FD;
   10016 
   10017 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10018 			tipg_reg |= 0xff;
   10019 			/* Reduce Rx latency in analog PHY */
   10020 			emi_val = 0;
   10021 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10022 		    fdx && speed != STATUS_SPEED_1000) {
   10023 			tipg_reg |= 0xc;
   10024 			emi_val = 1;
   10025 		} else {
   10026 			/* Roll back the default values */
   10027 			tipg_reg |= 0x08;
   10028 			emi_val = 1;
   10029 		}
   10030 
   10031 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10032 
   10033 		rv = sc->phy.acquire(sc);
   10034 		if (rv)
   10035 			return;
   10036 
   10037 		if (sc->sc_type == WM_T_PCH2)
   10038 			emi_addr = I82579_RX_CONFIG;
   10039 		else
   10040 			emi_addr = I217_RX_CONFIG;
   10041 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10042 
   10043 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10044 			uint16_t phy_reg;
   10045 
   10046 			sc->phy.readreg_locked(dev, 2,
   10047 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10048 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10049 			if (speed == STATUS_SPEED_100
   10050 			    || speed == STATUS_SPEED_10)
   10051 				phy_reg |= 0x3e8;
   10052 			else
   10053 				phy_reg |= 0xfa;
   10054 			sc->phy.writereg_locked(dev, 2,
   10055 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10056 
   10057 			if (speed == STATUS_SPEED_1000) {
   10058 				sc->phy.readreg_locked(dev, 2,
   10059 				    HV_PM_CTRL, &phy_reg);
   10060 
   10061 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10062 
   10063 				sc->phy.writereg_locked(dev, 2,
   10064 				    HV_PM_CTRL, phy_reg);
   10065 			}
   10066 		}
   10067 		sc->phy.release(sc);
   10068 
   10069 		if (rv)
   10070 			return;
   10071 
   10072 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10073 			uint16_t data, ptr_gap;
   10074 
   10075 			if (speed == STATUS_SPEED_1000) {
   10076 				rv = sc->phy.acquire(sc);
   10077 				if (rv)
   10078 					return;
   10079 
   10080 				rv = sc->phy.readreg_locked(dev, 2,
   10081 				    I82579_UNKNOWN1, &data);
   10082 				if (rv) {
   10083 					sc->phy.release(sc);
   10084 					return;
   10085 				}
   10086 
   10087 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10088 				if (ptr_gap < 0x18) {
   10089 					data &= ~(0x3ff << 2);
   10090 					data |= (0x18 << 2);
   10091 					rv = sc->phy.writereg_locked(dev,
   10092 					    2, I82579_UNKNOWN1, data);
   10093 				}
   10094 				sc->phy.release(sc);
   10095 				if (rv)
   10096 					return;
   10097 			} else {
   10098 				rv = sc->phy.acquire(sc);
   10099 				if (rv)
   10100 					return;
   10101 
   10102 				rv = sc->phy.writereg_locked(dev, 2,
   10103 				    I82579_UNKNOWN1, 0xc023);
   10104 				sc->phy.release(sc);
   10105 				if (rv)
   10106 					return;
   10107 
   10108 			}
   10109 		}
   10110 	}
   10111 
   10112 	/*
   10113 	 * I217 Packet Loss issue:
   10114 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10115 	 * on power up.
   10116 	 * Set the Beacon Duration for I217 to 8 usec
   10117 	 */
   10118 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10119 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10120 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10121 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10122 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10123 	}
   10124 
   10125 	/* Work-around I218 hang issue */
   10126 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10127 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10128 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10129 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10130 		wm_k1_workaround_lpt_lp(sc, link);
   10131 
   10132 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10133 		/*
   10134 		 * Set platform power management values for Latency
   10135 		 * Tolerance Reporting (LTR)
   10136 		 */
   10137 		wm_platform_pm_pch_lpt(sc,
   10138 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10139 	}
   10140 
   10141 	/* Clear link partner's EEE ability */
   10142 	sc->eee_lp_ability = 0;
   10143 
   10144 	/* FEXTNVM6 K1-off workaround */
   10145 	if (sc->sc_type == WM_T_PCH_SPT) {
   10146 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10147 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10148 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10149 		else
   10150 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10151 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10152 	}
   10153 
   10154 	if (!link)
   10155 		return;
   10156 
   10157 	switch (sc->sc_type) {
   10158 	case WM_T_PCH2:
   10159 		wm_k1_workaround_lv(sc);
   10160 		/* FALLTHROUGH */
   10161 	case WM_T_PCH:
   10162 		if (sc->sc_phytype == WMPHY_82578)
   10163 			wm_link_stall_workaround_hv(sc);
   10164 		break;
   10165 	default:
   10166 		break;
   10167 	}
   10168 
   10169 	/* Enable/Disable EEE after link up */
   10170 	if (sc->sc_phytype > WMPHY_82579)
   10171 		wm_set_eee_pchlan(sc);
   10172 }
   10173 
   10174 /*
   10175  * wm_linkintr_tbi:
   10176  *
   10177  *	Helper; handle link interrupts for TBI mode.
   10178  */
   10179 static void
   10180 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10181 {
   10182 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10183 	uint32_t status;
   10184 
   10185 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10186 		__func__));
   10187 
   10188 	status = CSR_READ(sc, WMREG_STATUS);
   10189 	if (icr & ICR_LSC) {
   10190 		wm_check_for_link(sc);
   10191 		if (status & STATUS_LU) {
   10192 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10193 				device_xname(sc->sc_dev),
   10194 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10195 			/*
   10196 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10197 			 * so we should update sc->sc_ctrl
   10198 			 */
   10199 
   10200 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10201 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10202 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10203 			if (status & STATUS_FD)
   10204 				sc->sc_tctl |=
   10205 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10206 			else
   10207 				sc->sc_tctl |=
   10208 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10209 			if (sc->sc_ctrl & CTRL_TFCE)
   10210 				sc->sc_fcrtl |= FCRTL_XONE;
   10211 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10212 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10213 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10214 			sc->sc_tbi_linkup = 1;
   10215 			if_link_state_change(ifp, LINK_STATE_UP);
   10216 		} else {
   10217 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10218 				device_xname(sc->sc_dev)));
   10219 			sc->sc_tbi_linkup = 0;
   10220 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10221 		}
   10222 		/* Update LED */
   10223 		wm_tbi_serdes_set_linkled(sc);
   10224 	} else if (icr & ICR_RXSEQ)
   10225 		DPRINTF(sc, WM_DEBUG_LINK,
   10226 		    ("%s: LINK: Receive sequence error\n",
   10227 			device_xname(sc->sc_dev)));
   10228 }
   10229 
   10230 /*
   10231  * wm_linkintr_serdes:
   10232  *
   10233  *	Helper; handle link interrupts for TBI mode.
   10234  */
   10235 static void
   10236 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10237 {
   10238 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10239 	struct mii_data *mii = &sc->sc_mii;
   10240 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10241 	uint32_t pcs_adv, pcs_lpab, reg;
   10242 
   10243 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10244 		__func__));
   10245 
   10246 	if (icr & ICR_LSC) {
   10247 		/* Check PCS */
   10248 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10249 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10250 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10251 				device_xname(sc->sc_dev)));
   10252 			mii->mii_media_status |= IFM_ACTIVE;
   10253 			sc->sc_tbi_linkup = 1;
   10254 			if_link_state_change(ifp, LINK_STATE_UP);
   10255 		} else {
   10256 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10257 				device_xname(sc->sc_dev)));
   10258 			mii->mii_media_status |= IFM_NONE;
   10259 			sc->sc_tbi_linkup = 0;
   10260 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10261 			wm_tbi_serdes_set_linkled(sc);
   10262 			return;
   10263 		}
   10264 		mii->mii_media_active |= IFM_1000_SX;
   10265 		if ((reg & PCS_LSTS_FDX) != 0)
   10266 			mii->mii_media_active |= IFM_FDX;
   10267 		else
   10268 			mii->mii_media_active |= IFM_HDX;
   10269 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10270 			/* Check flow */
   10271 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10272 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10273 				DPRINTF(sc, WM_DEBUG_LINK,
   10274 				    ("XXX LINKOK but not ACOMP\n"));
   10275 				return;
   10276 			}
   10277 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10278 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10279 			DPRINTF(sc, WM_DEBUG_LINK,
   10280 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10281 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10282 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10283 				mii->mii_media_active |= IFM_FLOW
   10284 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10285 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10286 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10287 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10288 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10289 				mii->mii_media_active |= IFM_FLOW
   10290 				    | IFM_ETH_TXPAUSE;
   10291 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10292 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10293 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10294 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10295 				mii->mii_media_active |= IFM_FLOW
   10296 				    | IFM_ETH_RXPAUSE;
   10297 		}
   10298 		/* Update LED */
   10299 		wm_tbi_serdes_set_linkled(sc);
   10300 	} else
   10301 		DPRINTF(sc, WM_DEBUG_LINK,
   10302 		    ("%s: LINK: Receive sequence error\n",
   10303 		    device_xname(sc->sc_dev)));
   10304 }
   10305 
   10306 /*
   10307  * wm_linkintr:
   10308  *
   10309  *	Helper; handle link interrupts.
   10310  */
   10311 static void
   10312 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10313 {
   10314 
   10315 	KASSERT(mutex_owned(sc->sc_core_lock));
   10316 
   10317 	if (sc->sc_flags & WM_F_HAS_MII)
   10318 		wm_linkintr_gmii(sc, icr);
   10319 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10320 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10321 		wm_linkintr_serdes(sc, icr);
   10322 	else
   10323 		wm_linkintr_tbi(sc, icr);
   10324 }
   10325 
   10326 
   10327 static inline void
   10328 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10329 {
   10330 
   10331 	if (wmq->wmq_txrx_use_workqueue) {
   10332 		if (!wmq->wmq_wq_enqueued) {
   10333 			wmq->wmq_wq_enqueued = true;
   10334 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
   10335 			    curcpu());
   10336 		}
   10337 	} else
   10338 		softint_schedule(wmq->wmq_si);
   10339 }
   10340 
   10341 static inline void
   10342 wm_legacy_intr_disable(struct wm_softc *sc)
   10343 {
   10344 
   10345 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10346 }
   10347 
   10348 static inline void
   10349 wm_legacy_intr_enable(struct wm_softc *sc)
   10350 {
   10351 
   10352 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10353 }
   10354 
   10355 /*
   10356  * wm_intr_legacy:
   10357  *
   10358  *	Interrupt service routine for INTx and MSI.
   10359  */
   10360 static int
   10361 wm_intr_legacy(void *arg)
   10362 {
   10363 	struct wm_softc *sc = arg;
   10364 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10365 	struct wm_queue *wmq = &sc->sc_queue[0];
   10366 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10367 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10368 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10369 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10370 	uint32_t icr, rndval = 0;
   10371 	bool more = false;
   10372 
   10373 	icr = CSR_READ(sc, WMREG_ICR);
   10374 	if ((icr & sc->sc_icr) == 0)
   10375 		return 0;
   10376 
   10377 	DPRINTF(sc, WM_DEBUG_TX,
   10378 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10379 	if (rndval == 0)
   10380 		rndval = icr;
   10381 
   10382 	mutex_enter(txq->txq_lock);
   10383 
   10384 	if (txq->txq_stopping) {
   10385 		mutex_exit(txq->txq_lock);
   10386 		return 1;
   10387 	}
   10388 
   10389 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10390 	if (icr & ICR_TXDW) {
   10391 		DPRINTF(sc, WM_DEBUG_TX,
   10392 		    ("%s: TX: got TXDW interrupt\n",
   10393 			device_xname(sc->sc_dev)));
   10394 		WM_Q_EVCNT_INCR(txq, txdw);
   10395 	}
   10396 #endif
   10397 	if (txlimit > 0) {
   10398 		more |= wm_txeof(txq, txlimit);
   10399 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10400 			more = true;
   10401 	} else
   10402 		more = true;
   10403 	mutex_exit(txq->txq_lock);
   10404 
   10405 	mutex_enter(rxq->rxq_lock);
   10406 
   10407 	if (rxq->rxq_stopping) {
   10408 		mutex_exit(rxq->rxq_lock);
   10409 		return 1;
   10410 	}
   10411 
   10412 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10413 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10414 		DPRINTF(sc, WM_DEBUG_RX,
   10415 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10416 			device_xname(sc->sc_dev),
   10417 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10418 		WM_Q_EVCNT_INCR(rxq, intr);
   10419 	}
   10420 #endif
   10421 	if (rxlimit > 0) {
   10422 		/*
   10423 		 * wm_rxeof() does *not* call upper layer functions directly,
   10424 		 * as if_percpuq_enqueue() just call softint_schedule().
   10425 		 * So, we can call wm_rxeof() in interrupt context.
   10426 		 */
   10427 		more = wm_rxeof(rxq, rxlimit);
   10428 	} else
   10429 		more = true;
   10430 
   10431 	mutex_exit(rxq->rxq_lock);
   10432 
   10433 	mutex_enter(sc->sc_core_lock);
   10434 
   10435 	if (sc->sc_core_stopping) {
   10436 		mutex_exit(sc->sc_core_lock);
   10437 		return 1;
   10438 	}
   10439 
   10440 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10441 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10442 		wm_linkintr(sc, icr);
   10443 	}
   10444 	if ((icr & ICR_GPI(0)) != 0)
   10445 		device_printf(sc->sc_dev, "got module interrupt\n");
   10446 
   10447 	mutex_exit(sc->sc_core_lock);
   10448 
   10449 	if (icr & ICR_RXO) {
   10450 #if defined(WM_DEBUG)
   10451 		log(LOG_WARNING, "%s: Receive overrun\n",
   10452 		    device_xname(sc->sc_dev));
   10453 #endif /* defined(WM_DEBUG) */
   10454 	}
   10455 
   10456 	rnd_add_uint32(&sc->rnd_source, rndval);
   10457 
   10458 	if (more) {
   10459 		/* Try to get more packets going. */
   10460 		wm_legacy_intr_disable(sc);
   10461 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10462 		wm_sched_handle_queue(sc, wmq);
   10463 	}
   10464 
   10465 	return 1;
   10466 }
   10467 
   10468 static inline void
   10469 wm_txrxintr_disable(struct wm_queue *wmq)
   10470 {
   10471 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10472 
   10473 	if (__predict_false(!wm_is_using_msix(sc))) {
   10474 		wm_legacy_intr_disable(sc);
   10475 		return;
   10476 	}
   10477 
   10478 	if (sc->sc_type == WM_T_82574)
   10479 		CSR_WRITE(sc, WMREG_IMC,
   10480 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10481 	else if (sc->sc_type == WM_T_82575)
   10482 		CSR_WRITE(sc, WMREG_EIMC,
   10483 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10484 	else
   10485 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10486 }
   10487 
   10488 static inline void
   10489 wm_txrxintr_enable(struct wm_queue *wmq)
   10490 {
   10491 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10492 
   10493 	wm_itrs_calculate(sc, wmq);
   10494 
   10495 	if (__predict_false(!wm_is_using_msix(sc))) {
   10496 		wm_legacy_intr_enable(sc);
   10497 		return;
   10498 	}
   10499 
   10500 	/*
   10501 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10502 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10503 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10504 	 * while each wm_handle_queue(wmq) is runnig.
   10505 	 */
   10506 	if (sc->sc_type == WM_T_82574)
   10507 		CSR_WRITE(sc, WMREG_IMS,
   10508 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10509 	else if (sc->sc_type == WM_T_82575)
   10510 		CSR_WRITE(sc, WMREG_EIMS,
   10511 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10512 	else
   10513 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10514 }
   10515 
   10516 static int
   10517 wm_txrxintr_msix(void *arg)
   10518 {
   10519 	struct wm_queue *wmq = arg;
   10520 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10521 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10522 	struct wm_softc *sc = txq->txq_sc;
   10523 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10524 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10525 	bool txmore;
   10526 	bool rxmore;
   10527 
   10528 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10529 
   10530 	DPRINTF(sc, WM_DEBUG_TX,
   10531 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10532 
   10533 	wm_txrxintr_disable(wmq);
   10534 
   10535 	mutex_enter(txq->txq_lock);
   10536 
   10537 	if (txq->txq_stopping) {
   10538 		mutex_exit(txq->txq_lock);
   10539 		return 1;
   10540 	}
   10541 
   10542 	WM_Q_EVCNT_INCR(txq, txdw);
   10543 	if (txlimit > 0) {
   10544 		txmore = wm_txeof(txq, txlimit);
   10545 		/* wm_deferred start() is done in wm_handle_queue(). */
   10546 	} else
   10547 		txmore = true;
   10548 	mutex_exit(txq->txq_lock);
   10549 
   10550 	DPRINTF(sc, WM_DEBUG_RX,
   10551 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10552 	mutex_enter(rxq->rxq_lock);
   10553 
   10554 	if (rxq->rxq_stopping) {
   10555 		mutex_exit(rxq->rxq_lock);
   10556 		return 1;
   10557 	}
   10558 
   10559 	WM_Q_EVCNT_INCR(rxq, intr);
   10560 	if (rxlimit > 0) {
   10561 		rxmore = wm_rxeof(rxq, rxlimit);
   10562 	} else
   10563 		rxmore = true;
   10564 	mutex_exit(rxq->rxq_lock);
   10565 
   10566 	wm_itrs_writereg(sc, wmq);
   10567 
   10568 	if (txmore || rxmore) {
   10569 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10570 		wm_sched_handle_queue(sc, wmq);
   10571 	} else
   10572 		wm_txrxintr_enable(wmq);
   10573 
   10574 	return 1;
   10575 }
   10576 
   10577 static void
   10578 wm_handle_queue(void *arg)
   10579 {
   10580 	struct wm_queue *wmq = arg;
   10581 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10582 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10583 	struct wm_softc *sc = txq->txq_sc;
   10584 	u_int txlimit = sc->sc_tx_process_limit;
   10585 	u_int rxlimit = sc->sc_rx_process_limit;
   10586 	bool txmore;
   10587 	bool rxmore;
   10588 
   10589 	mutex_enter(txq->txq_lock);
   10590 	if (txq->txq_stopping) {
   10591 		mutex_exit(txq->txq_lock);
   10592 		return;
   10593 	}
   10594 	txmore = wm_txeof(txq, txlimit);
   10595 	wm_deferred_start_locked(txq);
   10596 	mutex_exit(txq->txq_lock);
   10597 
   10598 	mutex_enter(rxq->rxq_lock);
   10599 	if (rxq->rxq_stopping) {
   10600 		mutex_exit(rxq->rxq_lock);
   10601 		return;
   10602 	}
   10603 	WM_Q_EVCNT_INCR(rxq, defer);
   10604 	rxmore = wm_rxeof(rxq, rxlimit);
   10605 	mutex_exit(rxq->rxq_lock);
   10606 
   10607 	if (txmore || rxmore) {
   10608 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10609 		wm_sched_handle_queue(sc, wmq);
   10610 	} else
   10611 		wm_txrxintr_enable(wmq);
   10612 }
   10613 
   10614 static void
   10615 wm_handle_queue_work(struct work *wk, void *context)
   10616 {
   10617 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10618 
   10619 	/*
   10620 	 * Some qemu environment workaround.  They don't stop interrupt
   10621 	 * immediately.
   10622 	 */
   10623 	wmq->wmq_wq_enqueued = false;
   10624 	wm_handle_queue(wmq);
   10625 }
   10626 
   10627 /*
   10628  * wm_linkintr_msix:
   10629  *
   10630  *	Interrupt service routine for link status change for MSI-X.
   10631  */
   10632 static int
   10633 wm_linkintr_msix(void *arg)
   10634 {
   10635 	struct wm_softc *sc = arg;
   10636 	uint32_t reg;
   10637 	bool has_rxo;
   10638 
   10639 	reg = CSR_READ(sc, WMREG_ICR);
   10640 	mutex_enter(sc->sc_core_lock);
   10641 	DPRINTF(sc, WM_DEBUG_LINK,
   10642 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10643 		device_xname(sc->sc_dev), reg));
   10644 
   10645 	if (sc->sc_core_stopping)
   10646 		goto out;
   10647 
   10648 	if ((reg & ICR_LSC) != 0) {
   10649 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10650 		wm_linkintr(sc, ICR_LSC);
   10651 	}
   10652 	if ((reg & ICR_GPI(0)) != 0)
   10653 		device_printf(sc->sc_dev, "got module interrupt\n");
   10654 
   10655 	/*
   10656 	 * XXX 82574 MSI-X mode workaround
   10657 	 *
   10658 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10659 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10660 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10661 	 * interrupts by writing WMREG_ICS to process receive packets.
   10662 	 */
   10663 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10664 #if defined(WM_DEBUG)
   10665 		log(LOG_WARNING, "%s: Receive overrun\n",
   10666 		    device_xname(sc->sc_dev));
   10667 #endif /* defined(WM_DEBUG) */
   10668 
   10669 		has_rxo = true;
   10670 		/*
   10671 		 * The RXO interrupt is very high rate when receive traffic is
   10672 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10673 		 * interrupts. ICR_OTHER will be enabled at the end of
   10674 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10675 		 * ICR_RXQ(1) interrupts.
   10676 		 */
   10677 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10678 
   10679 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10680 	}
   10681 
   10682 
   10683 
   10684 out:
   10685 	mutex_exit(sc->sc_core_lock);
   10686 
   10687 	if (sc->sc_type == WM_T_82574) {
   10688 		if (!has_rxo)
   10689 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10690 		else
   10691 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10692 	} else if (sc->sc_type == WM_T_82575)
   10693 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10694 	else
   10695 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10696 
   10697 	return 1;
   10698 }
   10699 
   10700 /*
   10701  * Media related.
   10702  * GMII, SGMII, TBI (and SERDES)
   10703  */
   10704 
   10705 /* Common */
   10706 
   10707 /*
   10708  * wm_tbi_serdes_set_linkled:
   10709  *
   10710  *	Update the link LED on TBI and SERDES devices.
   10711  */
   10712 static void
   10713 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10714 {
   10715 
   10716 	if (sc->sc_tbi_linkup)
   10717 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10718 	else
   10719 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10720 
   10721 	/* 82540 or newer devices are active low */
   10722 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10723 
   10724 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10725 }
   10726 
   10727 /* GMII related */
   10728 
   10729 /*
   10730  * wm_gmii_reset:
   10731  *
   10732  *	Reset the PHY.
   10733  */
   10734 static void
   10735 wm_gmii_reset(struct wm_softc *sc)
   10736 {
   10737 	uint32_t reg;
   10738 	int rv;
   10739 
   10740 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10741 		device_xname(sc->sc_dev), __func__));
   10742 
   10743 	rv = sc->phy.acquire(sc);
   10744 	if (rv != 0) {
   10745 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10746 		    __func__);
   10747 		return;
   10748 	}
   10749 
   10750 	switch (sc->sc_type) {
   10751 	case WM_T_82542_2_0:
   10752 	case WM_T_82542_2_1:
   10753 		/* null */
   10754 		break;
   10755 	case WM_T_82543:
   10756 		/*
   10757 		 * With 82543, we need to force speed and duplex on the MAC
   10758 		 * equal to what the PHY speed and duplex configuration is.
   10759 		 * In addition, we need to perform a hardware reset on the PHY
   10760 		 * to take it out of reset.
   10761 		 */
   10762 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10763 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10764 
   10765 		/* The PHY reset pin is active-low. */
   10766 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10767 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10768 		    CTRL_EXT_SWDPIN(4));
   10769 		reg |= CTRL_EXT_SWDPIO(4);
   10770 
   10771 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10772 		CSR_WRITE_FLUSH(sc);
   10773 		delay(10*1000);
   10774 
   10775 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10776 		CSR_WRITE_FLUSH(sc);
   10777 		delay(150);
   10778 #if 0
   10779 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10780 #endif
   10781 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10782 		break;
   10783 	case WM_T_82544:	/* Reset 10000us */
   10784 	case WM_T_82540:
   10785 	case WM_T_82545:
   10786 	case WM_T_82545_3:
   10787 	case WM_T_82546:
   10788 	case WM_T_82546_3:
   10789 	case WM_T_82541:
   10790 	case WM_T_82541_2:
   10791 	case WM_T_82547:
   10792 	case WM_T_82547_2:
   10793 	case WM_T_82571:	/* Reset 100us */
   10794 	case WM_T_82572:
   10795 	case WM_T_82573:
   10796 	case WM_T_82574:
   10797 	case WM_T_82575:
   10798 	case WM_T_82576:
   10799 	case WM_T_82580:
   10800 	case WM_T_I350:
   10801 	case WM_T_I354:
   10802 	case WM_T_I210:
   10803 	case WM_T_I211:
   10804 	case WM_T_82583:
   10805 	case WM_T_80003:
   10806 		/* Generic reset */
   10807 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10808 		CSR_WRITE_FLUSH(sc);
   10809 		delay(20000);
   10810 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10811 		CSR_WRITE_FLUSH(sc);
   10812 		delay(20000);
   10813 
   10814 		if ((sc->sc_type == WM_T_82541)
   10815 		    || (sc->sc_type == WM_T_82541_2)
   10816 		    || (sc->sc_type == WM_T_82547)
   10817 		    || (sc->sc_type == WM_T_82547_2)) {
   10818 			/* Workaround for igp are done in igp_reset() */
   10819 			/* XXX add code to set LED after phy reset */
   10820 		}
   10821 		break;
   10822 	case WM_T_ICH8:
   10823 	case WM_T_ICH9:
   10824 	case WM_T_ICH10:
   10825 	case WM_T_PCH:
   10826 	case WM_T_PCH2:
   10827 	case WM_T_PCH_LPT:
   10828 	case WM_T_PCH_SPT:
   10829 	case WM_T_PCH_CNP:
   10830 		/* Generic reset */
   10831 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10832 		CSR_WRITE_FLUSH(sc);
   10833 		delay(100);
   10834 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10835 		CSR_WRITE_FLUSH(sc);
   10836 		delay(150);
   10837 		break;
   10838 	default:
   10839 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10840 		    __func__);
   10841 		break;
   10842 	}
   10843 
   10844 	sc->phy.release(sc);
   10845 
   10846 	/* get_cfg_done */
   10847 	wm_get_cfg_done(sc);
   10848 
   10849 	/* Extra setup */
   10850 	switch (sc->sc_type) {
   10851 	case WM_T_82542_2_0:
   10852 	case WM_T_82542_2_1:
   10853 	case WM_T_82543:
   10854 	case WM_T_82544:
   10855 	case WM_T_82540:
   10856 	case WM_T_82545:
   10857 	case WM_T_82545_3:
   10858 	case WM_T_82546:
   10859 	case WM_T_82546_3:
   10860 	case WM_T_82541_2:
   10861 	case WM_T_82547_2:
   10862 	case WM_T_82571:
   10863 	case WM_T_82572:
   10864 	case WM_T_82573:
   10865 	case WM_T_82574:
   10866 	case WM_T_82583:
   10867 	case WM_T_82575:
   10868 	case WM_T_82576:
   10869 	case WM_T_82580:
   10870 	case WM_T_I350:
   10871 	case WM_T_I354:
   10872 	case WM_T_I210:
   10873 	case WM_T_I211:
   10874 	case WM_T_80003:
   10875 		/* Null */
   10876 		break;
   10877 	case WM_T_82541:
   10878 	case WM_T_82547:
   10879 		/* XXX Configure actively LED after PHY reset */
   10880 		break;
   10881 	case WM_T_ICH8:
   10882 	case WM_T_ICH9:
   10883 	case WM_T_ICH10:
   10884 	case WM_T_PCH:
   10885 	case WM_T_PCH2:
   10886 	case WM_T_PCH_LPT:
   10887 	case WM_T_PCH_SPT:
   10888 	case WM_T_PCH_CNP:
   10889 		wm_phy_post_reset(sc);
   10890 		break;
   10891 	default:
   10892 		panic("%s: unknown type\n", __func__);
   10893 		break;
   10894 	}
   10895 }
   10896 
   10897 /*
   10898  * Set up sc_phytype and mii_{read|write}reg.
   10899  *
   10900  *  To identify PHY type, correct read/write function should be selected.
   10901  * To select correct read/write function, PCI ID or MAC type are required
   10902  * without accessing PHY registers.
   10903  *
   10904  *  On the first call of this function, PHY ID is not known yet. Check
   10905  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10906  * result might be incorrect.
   10907  *
   10908  *  In the second call, PHY OUI and model is used to identify PHY type.
   10909  * It might not be perfect because of the lack of compared entry, but it
   10910  * would be better than the first call.
   10911  *
   10912  *  If the detected new result and previous assumption is different,
   10913  * a diagnostic message will be printed.
   10914  */
   10915 static void
   10916 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10917     uint16_t phy_model)
   10918 {
   10919 	device_t dev = sc->sc_dev;
   10920 	struct mii_data *mii = &sc->sc_mii;
   10921 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10922 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10923 	mii_readreg_t new_readreg;
   10924 	mii_writereg_t new_writereg;
   10925 	bool dodiag = true;
   10926 
   10927 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10928 		device_xname(sc->sc_dev), __func__));
   10929 
   10930 	/*
   10931 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10932 	 * incorrect. So don't print diag output when it's 2nd call.
   10933 	 */
   10934 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10935 		dodiag = false;
   10936 
   10937 	if (mii->mii_readreg == NULL) {
   10938 		/*
   10939 		 *  This is the first call of this function. For ICH and PCH
   10940 		 * variants, it's difficult to determine the PHY access method
   10941 		 * by sc_type, so use the PCI product ID for some devices.
   10942 		 */
   10943 
   10944 		switch (sc->sc_pcidevid) {
   10945 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10946 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10947 			/* 82577 */
   10948 			new_phytype = WMPHY_82577;
   10949 			break;
   10950 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10951 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10952 			/* 82578 */
   10953 			new_phytype = WMPHY_82578;
   10954 			break;
   10955 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10956 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10957 			/* 82579 */
   10958 			new_phytype = WMPHY_82579;
   10959 			break;
   10960 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10961 		case PCI_PRODUCT_INTEL_82801I_BM:
   10962 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10963 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10964 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10965 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10966 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10967 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10968 			/* ICH8, 9, 10 with 82567 */
   10969 			new_phytype = WMPHY_BM;
   10970 			break;
   10971 		default:
   10972 			break;
   10973 		}
   10974 	} else {
   10975 		/* It's not the first call. Use PHY OUI and model */
   10976 		switch (phy_oui) {
   10977 		case MII_OUI_ATTANSIC: /* atphy(4) */
   10978 			switch (phy_model) {
   10979 			case MII_MODEL_ATTANSIC_AR8021:
   10980 				new_phytype = WMPHY_82578;
   10981 				break;
   10982 			default:
   10983 				break;
   10984 			}
   10985 			break;
   10986 		case MII_OUI_xxMARVELL:
   10987 			switch (phy_model) {
   10988 			case MII_MODEL_xxMARVELL_I210:
   10989 				new_phytype = WMPHY_I210;
   10990 				break;
   10991 			case MII_MODEL_xxMARVELL_E1011:
   10992 			case MII_MODEL_xxMARVELL_E1000_3:
   10993 			case MII_MODEL_xxMARVELL_E1000_5:
   10994 			case MII_MODEL_xxMARVELL_E1112:
   10995 				new_phytype = WMPHY_M88;
   10996 				break;
   10997 			case MII_MODEL_xxMARVELL_E1149:
   10998 				new_phytype = WMPHY_BM;
   10999 				break;
   11000 			case MII_MODEL_xxMARVELL_E1111:
   11001 			case MII_MODEL_xxMARVELL_I347:
   11002 			case MII_MODEL_xxMARVELL_E1512:
   11003 			case MII_MODEL_xxMARVELL_E1340M:
   11004 			case MII_MODEL_xxMARVELL_E1543:
   11005 				new_phytype = WMPHY_M88;
   11006 				break;
   11007 			case MII_MODEL_xxMARVELL_I82563:
   11008 				new_phytype = WMPHY_GG82563;
   11009 				break;
   11010 			default:
   11011 				break;
   11012 			}
   11013 			break;
   11014 		case MII_OUI_INTEL:
   11015 			switch (phy_model) {
   11016 			case MII_MODEL_INTEL_I82577:
   11017 				new_phytype = WMPHY_82577;
   11018 				break;
   11019 			case MII_MODEL_INTEL_I82579:
   11020 				new_phytype = WMPHY_82579;
   11021 				break;
   11022 			case MII_MODEL_INTEL_I217:
   11023 				new_phytype = WMPHY_I217;
   11024 				break;
   11025 			case MII_MODEL_INTEL_I82580:
   11026 				new_phytype = WMPHY_82580;
   11027 				break;
   11028 			case MII_MODEL_INTEL_I350:
   11029 				new_phytype = WMPHY_I350;
   11030 				break;
   11031 			default:
   11032 				break;
   11033 			}
   11034 			break;
   11035 		case MII_OUI_yyINTEL:
   11036 			switch (phy_model) {
   11037 			case MII_MODEL_yyINTEL_I82562G:
   11038 			case MII_MODEL_yyINTEL_I82562EM:
   11039 			case MII_MODEL_yyINTEL_I82562ET:
   11040 				new_phytype = WMPHY_IFE;
   11041 				break;
   11042 			case MII_MODEL_yyINTEL_IGP01E1000:
   11043 				new_phytype = WMPHY_IGP;
   11044 				break;
   11045 			case MII_MODEL_yyINTEL_I82566:
   11046 				new_phytype = WMPHY_IGP_3;
   11047 				break;
   11048 			default:
   11049 				break;
   11050 			}
   11051 			break;
   11052 		default:
   11053 			break;
   11054 		}
   11055 
   11056 		if (dodiag) {
   11057 			if (new_phytype == WMPHY_UNKNOWN)
   11058 				aprint_verbose_dev(dev,
   11059 				    "%s: Unknown PHY model. OUI=%06x, "
   11060 				    "model=%04x\n", __func__, phy_oui,
   11061 				    phy_model);
   11062 
   11063 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11064 			    && (sc->sc_phytype != new_phytype)) {
   11065 				aprint_error_dev(dev, "Previously assumed PHY "
   11066 				    "type(%u) was incorrect. PHY type from PHY"
   11067 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11068 			}
   11069 		}
   11070 	}
   11071 
   11072 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11073 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11074 		/* SGMII */
   11075 		new_readreg = wm_sgmii_readreg;
   11076 		new_writereg = wm_sgmii_writereg;
   11077 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11078 		/* BM2 (phyaddr == 1) */
   11079 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11080 		    && (new_phytype != WMPHY_BM)
   11081 		    && (new_phytype != WMPHY_UNKNOWN))
   11082 			doubt_phytype = new_phytype;
   11083 		new_phytype = WMPHY_BM;
   11084 		new_readreg = wm_gmii_bm_readreg;
   11085 		new_writereg = wm_gmii_bm_writereg;
   11086 	} else if (sc->sc_type >= WM_T_PCH) {
   11087 		/* All PCH* use _hv_ */
   11088 		new_readreg = wm_gmii_hv_readreg;
   11089 		new_writereg = wm_gmii_hv_writereg;
   11090 	} else if (sc->sc_type >= WM_T_ICH8) {
   11091 		/* non-82567 ICH8, 9 and 10 */
   11092 		new_readreg = wm_gmii_i82544_readreg;
   11093 		new_writereg = wm_gmii_i82544_writereg;
   11094 	} else if (sc->sc_type >= WM_T_80003) {
   11095 		/* 80003 */
   11096 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11097 		    && (new_phytype != WMPHY_GG82563)
   11098 		    && (new_phytype != WMPHY_UNKNOWN))
   11099 			doubt_phytype = new_phytype;
   11100 		new_phytype = WMPHY_GG82563;
   11101 		new_readreg = wm_gmii_i80003_readreg;
   11102 		new_writereg = wm_gmii_i80003_writereg;
   11103 	} else if (sc->sc_type >= WM_T_I210) {
   11104 		/* I210 and I211 */
   11105 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11106 		    && (new_phytype != WMPHY_I210)
   11107 		    && (new_phytype != WMPHY_UNKNOWN))
   11108 			doubt_phytype = new_phytype;
   11109 		new_phytype = WMPHY_I210;
   11110 		new_readreg = wm_gmii_gs40g_readreg;
   11111 		new_writereg = wm_gmii_gs40g_writereg;
   11112 	} else if (sc->sc_type >= WM_T_82580) {
   11113 		/* 82580, I350 and I354 */
   11114 		new_readreg = wm_gmii_82580_readreg;
   11115 		new_writereg = wm_gmii_82580_writereg;
   11116 	} else if (sc->sc_type >= WM_T_82544) {
   11117 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11118 		new_readreg = wm_gmii_i82544_readreg;
   11119 		new_writereg = wm_gmii_i82544_writereg;
   11120 	} else {
   11121 		new_readreg = wm_gmii_i82543_readreg;
   11122 		new_writereg = wm_gmii_i82543_writereg;
   11123 	}
   11124 
   11125 	if (new_phytype == WMPHY_BM) {
   11126 		/* All BM use _bm_ */
   11127 		new_readreg = wm_gmii_bm_readreg;
   11128 		new_writereg = wm_gmii_bm_writereg;
   11129 	}
   11130 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11131 		/* All PCH* use _hv_ */
   11132 		new_readreg = wm_gmii_hv_readreg;
   11133 		new_writereg = wm_gmii_hv_writereg;
   11134 	}
   11135 
   11136 	/* Diag output */
   11137 	if (dodiag) {
   11138 		if (doubt_phytype != WMPHY_UNKNOWN)
   11139 			aprint_error_dev(dev, "Assumed new PHY type was "
   11140 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11141 			    new_phytype);
   11142 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11143 		    && (sc->sc_phytype != new_phytype))
   11144 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11145 			    "was incorrect. New PHY type = %u\n",
   11146 			    sc->sc_phytype, new_phytype);
   11147 
   11148 		if ((mii->mii_readreg != NULL) &&
   11149 		    (new_phytype == WMPHY_UNKNOWN))
   11150 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11151 
   11152 		if ((mii->mii_readreg != NULL) &&
   11153 		    (mii->mii_readreg != new_readreg))
   11154 			aprint_error_dev(dev, "Previously assumed PHY "
   11155 			    "read/write function was incorrect.\n");
   11156 	}
   11157 
   11158 	/* Update now */
   11159 	sc->sc_phytype = new_phytype;
   11160 	mii->mii_readreg = new_readreg;
   11161 	mii->mii_writereg = new_writereg;
   11162 	if (new_readreg == wm_gmii_hv_readreg) {
   11163 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11164 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11165 	} else if (new_readreg == wm_sgmii_readreg) {
   11166 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11167 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11168 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11169 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11170 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11171 	}
   11172 }
   11173 
   11174 /*
   11175  * wm_get_phy_id_82575:
   11176  *
   11177  * Return PHY ID. Return -1 if it failed.
   11178  */
   11179 static int
   11180 wm_get_phy_id_82575(struct wm_softc *sc)
   11181 {
   11182 	uint32_t reg;
   11183 	int phyid = -1;
   11184 
   11185 	/* XXX */
   11186 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11187 		return -1;
   11188 
   11189 	if (wm_sgmii_uses_mdio(sc)) {
   11190 		switch (sc->sc_type) {
   11191 		case WM_T_82575:
   11192 		case WM_T_82576:
   11193 			reg = CSR_READ(sc, WMREG_MDIC);
   11194 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11195 			break;
   11196 		case WM_T_82580:
   11197 		case WM_T_I350:
   11198 		case WM_T_I354:
   11199 		case WM_T_I210:
   11200 		case WM_T_I211:
   11201 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11202 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11203 			break;
   11204 		default:
   11205 			return -1;
   11206 		}
   11207 	}
   11208 
   11209 	return phyid;
   11210 }
   11211 
   11212 /*
   11213  * wm_gmii_mediainit:
   11214  *
   11215  *	Initialize media for use on 1000BASE-T devices.
   11216  */
   11217 static void
   11218 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11219 {
   11220 	device_t dev = sc->sc_dev;
   11221 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11222 	struct mii_data *mii = &sc->sc_mii;
   11223 
   11224 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11225 		device_xname(sc->sc_dev), __func__));
   11226 
   11227 	/* We have GMII. */
   11228 	sc->sc_flags |= WM_F_HAS_MII;
   11229 
   11230 	if (sc->sc_type == WM_T_80003)
   11231 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11232 	else
   11233 		sc->sc_tipg = TIPG_1000T_DFLT;
   11234 
   11235 	/*
   11236 	 * Let the chip set speed/duplex on its own based on
   11237 	 * signals from the PHY.
   11238 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11239 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11240 	 */
   11241 	sc->sc_ctrl |= CTRL_SLU;
   11242 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11243 
   11244 	/* Initialize our media structures and probe the GMII. */
   11245 	mii->mii_ifp = ifp;
   11246 
   11247 	mii->mii_statchg = wm_gmii_statchg;
   11248 
   11249 	/* get PHY control from SMBus to PCIe */
   11250 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11251 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11252 	    || (sc->sc_type == WM_T_PCH_CNP))
   11253 		wm_init_phy_workarounds_pchlan(sc);
   11254 
   11255 	wm_gmii_reset(sc);
   11256 
   11257 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11258 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11259 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11260 
   11261 	/* Setup internal SGMII PHY for SFP */
   11262 	wm_sgmii_sfp_preconfig(sc);
   11263 
   11264 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11265 	    || (sc->sc_type == WM_T_82580)
   11266 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11267 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11268 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11269 			/* Attach only one port */
   11270 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11271 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11272 		} else {
   11273 			int i, id;
   11274 			uint32_t ctrl_ext;
   11275 
   11276 			id = wm_get_phy_id_82575(sc);
   11277 			if (id != -1) {
   11278 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11279 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11280 			}
   11281 			if ((id == -1)
   11282 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11283 				/* Power on sgmii phy if it is disabled */
   11284 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11285 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11286 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11287 				CSR_WRITE_FLUSH(sc);
   11288 				delay(300*1000); /* XXX too long */
   11289 
   11290 				/*
   11291 				 * From 1 to 8.
   11292 				 *
   11293 				 * I2C access fails with I2C register's ERROR
   11294 				 * bit set, so prevent error message while
   11295 				 * scanning.
   11296 				 */
   11297 				sc->phy.no_errprint = true;
   11298 				for (i = 1; i < 8; i++)
   11299 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11300 					    0xffffffff, i, MII_OFFSET_ANY,
   11301 					    MIIF_DOPAUSE);
   11302 				sc->phy.no_errprint = false;
   11303 
   11304 				/* Restore previous sfp cage power state */
   11305 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11306 			}
   11307 		}
   11308 	} else
   11309 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11310 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11311 
   11312 	/*
   11313 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11314 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11315 	 */
   11316 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11317 		|| (sc->sc_type == WM_T_PCH_SPT)
   11318 		|| (sc->sc_type == WM_T_PCH_CNP))
   11319 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11320 		wm_set_mdio_slow_mode_hv(sc);
   11321 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11322 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11323 	}
   11324 
   11325 	/*
   11326 	 * (For ICH8 variants)
   11327 	 * If PHY detection failed, use BM's r/w function and retry.
   11328 	 */
   11329 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11330 		/* if failed, retry with *_bm_* */
   11331 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11332 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11333 		    sc->sc_phytype);
   11334 		sc->sc_phytype = WMPHY_BM;
   11335 		mii->mii_readreg = wm_gmii_bm_readreg;
   11336 		mii->mii_writereg = wm_gmii_bm_writereg;
   11337 
   11338 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11339 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11340 	}
   11341 
   11342 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11343 		/* Any PHY wasn't found */
   11344 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11345 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11346 		sc->sc_phytype = WMPHY_NONE;
   11347 	} else {
   11348 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11349 
   11350 		/*
   11351 		 * PHY found! Check PHY type again by the second call of
   11352 		 * wm_gmii_setup_phytype.
   11353 		 */
   11354 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11355 		    child->mii_mpd_model);
   11356 
   11357 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11358 	}
   11359 }
   11360 
   11361 /*
   11362  * wm_gmii_mediachange:	[ifmedia interface function]
   11363  *
   11364  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11365  */
   11366 static int
   11367 wm_gmii_mediachange(struct ifnet *ifp)
   11368 {
   11369 	struct wm_softc *sc = ifp->if_softc;
   11370 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11371 	uint32_t reg;
   11372 	int rc;
   11373 
   11374 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11375 		device_xname(sc->sc_dev), __func__));
   11376 
   11377 	KASSERT(mutex_owned(sc->sc_core_lock));
   11378 
   11379 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11380 		return 0;
   11381 
   11382 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11383 	if ((sc->sc_type == WM_T_82580)
   11384 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11385 	    || (sc->sc_type == WM_T_I211)) {
   11386 		reg = CSR_READ(sc, WMREG_PHPM);
   11387 		reg &= ~PHPM_GO_LINK_D;
   11388 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11389 	}
   11390 
   11391 	/* Disable D0 LPLU. */
   11392 	wm_lplu_d0_disable(sc);
   11393 
   11394 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11395 	sc->sc_ctrl |= CTRL_SLU;
   11396 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11397 	    || (sc->sc_type > WM_T_82543)) {
   11398 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11399 	} else {
   11400 		sc->sc_ctrl &= ~CTRL_ASDE;
   11401 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11402 		if (ife->ifm_media & IFM_FDX)
   11403 			sc->sc_ctrl |= CTRL_FD;
   11404 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11405 		case IFM_10_T:
   11406 			sc->sc_ctrl |= CTRL_SPEED_10;
   11407 			break;
   11408 		case IFM_100_TX:
   11409 			sc->sc_ctrl |= CTRL_SPEED_100;
   11410 			break;
   11411 		case IFM_1000_T:
   11412 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11413 			break;
   11414 		case IFM_NONE:
   11415 			/* There is no specific setting for IFM_NONE */
   11416 			break;
   11417 		default:
   11418 			panic("wm_gmii_mediachange: bad media 0x%x",
   11419 			    ife->ifm_media);
   11420 		}
   11421 	}
   11422 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11423 	CSR_WRITE_FLUSH(sc);
   11424 
   11425 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11426 		wm_serdes_mediachange(ifp);
   11427 
   11428 	if (sc->sc_type <= WM_T_82543)
   11429 		wm_gmii_reset(sc);
   11430 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11431 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11432 		/* allow time for SFP cage time to power up phy */
   11433 		delay(300 * 1000);
   11434 		wm_gmii_reset(sc);
   11435 	}
   11436 
   11437 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11438 		return 0;
   11439 	return rc;
   11440 }
   11441 
   11442 /*
   11443  * wm_gmii_mediastatus:	[ifmedia interface function]
   11444  *
   11445  *	Get the current interface media status on a 1000BASE-T device.
   11446  */
   11447 static void
   11448 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11449 {
   11450 	struct wm_softc *sc = ifp->if_softc;
   11451 
   11452 	KASSERT(mutex_owned(sc->sc_core_lock));
   11453 
   11454 	ether_mediastatus(ifp, ifmr);
   11455 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11456 	    | sc->sc_flowflags;
   11457 }
   11458 
   11459 #define	MDI_IO		CTRL_SWDPIN(2)
   11460 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11461 #define	MDI_CLK		CTRL_SWDPIN(3)
   11462 
   11463 static void
   11464 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11465 {
   11466 	uint32_t i, v;
   11467 
   11468 	v = CSR_READ(sc, WMREG_CTRL);
   11469 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11470 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11471 
   11472 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11473 		if (data & i)
   11474 			v |= MDI_IO;
   11475 		else
   11476 			v &= ~MDI_IO;
   11477 		CSR_WRITE(sc, WMREG_CTRL, v);
   11478 		CSR_WRITE_FLUSH(sc);
   11479 		delay(10);
   11480 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11481 		CSR_WRITE_FLUSH(sc);
   11482 		delay(10);
   11483 		CSR_WRITE(sc, WMREG_CTRL, v);
   11484 		CSR_WRITE_FLUSH(sc);
   11485 		delay(10);
   11486 	}
   11487 }
   11488 
   11489 static uint16_t
   11490 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11491 {
   11492 	uint32_t v, i;
   11493 	uint16_t data = 0;
   11494 
   11495 	v = CSR_READ(sc, WMREG_CTRL);
   11496 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11497 	v |= CTRL_SWDPIO(3);
   11498 
   11499 	CSR_WRITE(sc, WMREG_CTRL, v);
   11500 	CSR_WRITE_FLUSH(sc);
   11501 	delay(10);
   11502 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11503 	CSR_WRITE_FLUSH(sc);
   11504 	delay(10);
   11505 	CSR_WRITE(sc, WMREG_CTRL, v);
   11506 	CSR_WRITE_FLUSH(sc);
   11507 	delay(10);
   11508 
   11509 	for (i = 0; i < 16; i++) {
   11510 		data <<= 1;
   11511 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11512 		CSR_WRITE_FLUSH(sc);
   11513 		delay(10);
   11514 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11515 			data |= 1;
   11516 		CSR_WRITE(sc, WMREG_CTRL, v);
   11517 		CSR_WRITE_FLUSH(sc);
   11518 		delay(10);
   11519 	}
   11520 
   11521 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11522 	CSR_WRITE_FLUSH(sc);
   11523 	delay(10);
   11524 	CSR_WRITE(sc, WMREG_CTRL, v);
   11525 	CSR_WRITE_FLUSH(sc);
   11526 	delay(10);
   11527 
   11528 	return data;
   11529 }
   11530 
   11531 #undef MDI_IO
   11532 #undef MDI_DIR
   11533 #undef MDI_CLK
   11534 
   11535 /*
   11536  * wm_gmii_i82543_readreg:	[mii interface function]
   11537  *
   11538  *	Read a PHY register on the GMII (i82543 version).
   11539  */
   11540 static int
   11541 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11542 {
   11543 	struct wm_softc *sc = device_private(dev);
   11544 
   11545 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11546 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11547 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11548 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11549 
   11550 	DPRINTF(sc, WM_DEBUG_GMII,
   11551 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11552 		device_xname(dev), phy, reg, *val));
   11553 
   11554 	return 0;
   11555 }
   11556 
   11557 /*
   11558  * wm_gmii_i82543_writereg:	[mii interface function]
   11559  *
   11560  *	Write a PHY register on the GMII (i82543 version).
   11561  */
   11562 static int
   11563 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11564 {
   11565 	struct wm_softc *sc = device_private(dev);
   11566 
   11567 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11568 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11569 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11570 	    (MII_COMMAND_START << 30), 32);
   11571 
   11572 	return 0;
   11573 }
   11574 
   11575 /*
   11576  * wm_gmii_mdic_readreg:	[mii interface function]
   11577  *
   11578  *	Read a PHY register on the GMII.
   11579  */
   11580 static int
   11581 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11582 {
   11583 	struct wm_softc *sc = device_private(dev);
   11584 	uint32_t mdic = 0;
   11585 	int i;
   11586 
   11587 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11588 	    && (reg > MII_ADDRMASK)) {
   11589 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11590 		    __func__, sc->sc_phytype, reg);
   11591 		reg &= MII_ADDRMASK;
   11592 	}
   11593 
   11594 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11595 	    MDIC_REGADD(reg));
   11596 
   11597 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11598 		delay(50);
   11599 		mdic = CSR_READ(sc, WMREG_MDIC);
   11600 		if (mdic & MDIC_READY)
   11601 			break;
   11602 	}
   11603 
   11604 	if ((mdic & MDIC_READY) == 0) {
   11605 		DPRINTF(sc, WM_DEBUG_GMII,
   11606 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11607 			device_xname(dev), phy, reg));
   11608 		return ETIMEDOUT;
   11609 	} else if (mdic & MDIC_E) {
   11610 		/* This is normal if no PHY is present. */
   11611 		DPRINTF(sc, WM_DEBUG_GMII,
   11612 		    ("%s: MDIC read error: phy %d reg %d\n",
   11613 			device_xname(sc->sc_dev), phy, reg));
   11614 		return -1;
   11615 	} else
   11616 		*val = MDIC_DATA(mdic);
   11617 
   11618 	/*
   11619 	 * Allow some time after each MDIC transaction to avoid
   11620 	 * reading duplicate data in the next MDIC transaction.
   11621 	 */
   11622 	if (sc->sc_type == WM_T_PCH2)
   11623 		delay(100);
   11624 
   11625 	return 0;
   11626 }
   11627 
   11628 /*
   11629  * wm_gmii_mdic_writereg:	[mii interface function]
   11630  *
   11631  *	Write a PHY register on the GMII.
   11632  */
   11633 static int
   11634 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11635 {
   11636 	struct wm_softc *sc = device_private(dev);
   11637 	uint32_t mdic = 0;
   11638 	int i;
   11639 
   11640 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11641 	    && (reg > MII_ADDRMASK)) {
   11642 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11643 		    __func__, sc->sc_phytype, reg);
   11644 		reg &= MII_ADDRMASK;
   11645 	}
   11646 
   11647 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11648 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11649 
   11650 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11651 		delay(50);
   11652 		mdic = CSR_READ(sc, WMREG_MDIC);
   11653 		if (mdic & MDIC_READY)
   11654 			break;
   11655 	}
   11656 
   11657 	if ((mdic & MDIC_READY) == 0) {
   11658 		DPRINTF(sc, WM_DEBUG_GMII,
   11659 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11660 			device_xname(dev), phy, reg));
   11661 		return ETIMEDOUT;
   11662 	} else if (mdic & MDIC_E) {
   11663 		DPRINTF(sc, WM_DEBUG_GMII,
   11664 		    ("%s: MDIC write error: phy %d reg %d\n",
   11665 			device_xname(dev), phy, reg));
   11666 		return -1;
   11667 	}
   11668 
   11669 	/*
   11670 	 * Allow some time after each MDIC transaction to avoid
   11671 	 * reading duplicate data in the next MDIC transaction.
   11672 	 */
   11673 	if (sc->sc_type == WM_T_PCH2)
   11674 		delay(100);
   11675 
   11676 	return 0;
   11677 }
   11678 
   11679 /*
   11680  * wm_gmii_i82544_readreg:	[mii interface function]
   11681  *
   11682  *	Read a PHY register on the GMII.
   11683  */
   11684 static int
   11685 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11686 {
   11687 	struct wm_softc *sc = device_private(dev);
   11688 	int rv;
   11689 
   11690 	rv = sc->phy.acquire(sc);
   11691 	if (rv != 0) {
   11692 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11693 		return rv;
   11694 	}
   11695 
   11696 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11697 
   11698 	sc->phy.release(sc);
   11699 
   11700 	return rv;
   11701 }
   11702 
   11703 static int
   11704 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11705 {
   11706 	struct wm_softc *sc = device_private(dev);
   11707 	int rv;
   11708 
   11709 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11710 		switch (sc->sc_phytype) {
   11711 		case WMPHY_IGP:
   11712 		case WMPHY_IGP_2:
   11713 		case WMPHY_IGP_3:
   11714 			rv = wm_gmii_mdic_writereg(dev, phy,
   11715 			    IGPHY_PAGE_SELECT, reg);
   11716 			if (rv != 0)
   11717 				return rv;
   11718 			break;
   11719 		default:
   11720 #ifdef WM_DEBUG
   11721 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11722 			    __func__, sc->sc_phytype, reg);
   11723 #endif
   11724 			break;
   11725 		}
   11726 	}
   11727 
   11728 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11729 }
   11730 
   11731 /*
   11732  * wm_gmii_i82544_writereg:	[mii interface function]
   11733  *
   11734  *	Write a PHY register on the GMII.
   11735  */
   11736 static int
   11737 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11738 {
   11739 	struct wm_softc *sc = device_private(dev);
   11740 	int rv;
   11741 
   11742 	rv = sc->phy.acquire(sc);
   11743 	if (rv != 0) {
   11744 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11745 		return rv;
   11746 	}
   11747 
   11748 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11749 	sc->phy.release(sc);
   11750 
   11751 	return rv;
   11752 }
   11753 
   11754 static int
   11755 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11756 {
   11757 	struct wm_softc *sc = device_private(dev);
   11758 	int rv;
   11759 
   11760 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11761 		switch (sc->sc_phytype) {
   11762 		case WMPHY_IGP:
   11763 		case WMPHY_IGP_2:
   11764 		case WMPHY_IGP_3:
   11765 			rv = wm_gmii_mdic_writereg(dev, phy,
   11766 			    IGPHY_PAGE_SELECT, reg);
   11767 			if (rv != 0)
   11768 				return rv;
   11769 			break;
   11770 		default:
   11771 #ifdef WM_DEBUG
   11772 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11773 			    __func__, sc->sc_phytype, reg);
   11774 #endif
   11775 			break;
   11776 		}
   11777 	}
   11778 
   11779 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11780 }
   11781 
   11782 /*
   11783  * wm_gmii_i80003_readreg:	[mii interface function]
   11784  *
   11785  *	Read a PHY register on the kumeran
   11786  * This could be handled by the PHY layer if we didn't have to lock the
   11787  * resource ...
   11788  */
   11789 static int
   11790 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11791 {
   11792 	struct wm_softc *sc = device_private(dev);
   11793 	int page_select;
   11794 	uint16_t temp, temp2;
   11795 	int rv;
   11796 
   11797 	if (phy != 1) /* Only one PHY on kumeran bus */
   11798 		return -1;
   11799 
   11800 	rv = sc->phy.acquire(sc);
   11801 	if (rv != 0) {
   11802 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11803 		return rv;
   11804 	}
   11805 
   11806 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11807 		page_select = GG82563_PHY_PAGE_SELECT;
   11808 	else {
   11809 		/*
   11810 		 * Use Alternative Page Select register to access registers
   11811 		 * 30 and 31.
   11812 		 */
   11813 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11814 	}
   11815 	temp = reg >> GG82563_PAGE_SHIFT;
   11816 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11817 		goto out;
   11818 
   11819 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11820 		/*
   11821 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11822 		 * register.
   11823 		 */
   11824 		delay(200);
   11825 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11826 		if ((rv != 0) || (temp2 != temp)) {
   11827 			device_printf(dev, "%s failed\n", __func__);
   11828 			rv = -1;
   11829 			goto out;
   11830 		}
   11831 		delay(200);
   11832 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11833 		delay(200);
   11834 	} else
   11835 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11836 
   11837 out:
   11838 	sc->phy.release(sc);
   11839 	return rv;
   11840 }
   11841 
   11842 /*
   11843  * wm_gmii_i80003_writereg:	[mii interface function]
   11844  *
   11845  *	Write a PHY register on the kumeran.
   11846  * This could be handled by the PHY layer if we didn't have to lock the
   11847  * resource ...
   11848  */
   11849 static int
   11850 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11851 {
   11852 	struct wm_softc *sc = device_private(dev);
   11853 	int page_select, rv;
   11854 	uint16_t temp, temp2;
   11855 
   11856 	if (phy != 1) /* Only one PHY on kumeran bus */
   11857 		return -1;
   11858 
   11859 	rv = sc->phy.acquire(sc);
   11860 	if (rv != 0) {
   11861 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11862 		return rv;
   11863 	}
   11864 
   11865 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11866 		page_select = GG82563_PHY_PAGE_SELECT;
   11867 	else {
   11868 		/*
   11869 		 * Use Alternative Page Select register to access registers
   11870 		 * 30 and 31.
   11871 		 */
   11872 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11873 	}
   11874 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11875 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11876 		goto out;
   11877 
   11878 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11879 		/*
   11880 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11881 		 * register.
   11882 		 */
   11883 		delay(200);
   11884 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11885 		if ((rv != 0) || (temp2 != temp)) {
   11886 			device_printf(dev, "%s failed\n", __func__);
   11887 			rv = -1;
   11888 			goto out;
   11889 		}
   11890 		delay(200);
   11891 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11892 		delay(200);
   11893 	} else
   11894 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11895 
   11896 out:
   11897 	sc->phy.release(sc);
   11898 	return rv;
   11899 }
   11900 
   11901 /*
   11902  * wm_gmii_bm_readreg:	[mii interface function]
   11903  *
   11904  *	Read a PHY register on the kumeran
   11905  * This could be handled by the PHY layer if we didn't have to lock the
   11906  * resource ...
   11907  */
   11908 static int
   11909 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11910 {
   11911 	struct wm_softc *sc = device_private(dev);
   11912 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11913 	int rv;
   11914 
   11915 	rv = sc->phy.acquire(sc);
   11916 	if (rv != 0) {
   11917 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11918 		return rv;
   11919 	}
   11920 
   11921 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11922 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11923 		    || (reg == 31)) ? 1 : phy;
   11924 	/* Page 800 works differently than the rest so it has its own func */
   11925 	if (page == BM_WUC_PAGE) {
   11926 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11927 		goto release;
   11928 	}
   11929 
   11930 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11931 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11932 		    && (sc->sc_type != WM_T_82583))
   11933 			rv = wm_gmii_mdic_writereg(dev, phy,
   11934 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11935 		else
   11936 			rv = wm_gmii_mdic_writereg(dev, phy,
   11937 			    BME1000_PHY_PAGE_SELECT, page);
   11938 		if (rv != 0)
   11939 			goto release;
   11940 	}
   11941 
   11942 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11943 
   11944 release:
   11945 	sc->phy.release(sc);
   11946 	return rv;
   11947 }
   11948 
   11949 /*
   11950  * wm_gmii_bm_writereg:	[mii interface function]
   11951  *
   11952  *	Write a PHY register on the kumeran.
   11953  * This could be handled by the PHY layer if we didn't have to lock the
   11954  * resource ...
   11955  */
   11956 static int
   11957 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11958 {
   11959 	struct wm_softc *sc = device_private(dev);
   11960 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11961 	int rv;
   11962 
   11963 	rv = sc->phy.acquire(sc);
   11964 	if (rv != 0) {
   11965 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11966 		return rv;
   11967 	}
   11968 
   11969 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11970 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11971 		    || (reg == 31)) ? 1 : phy;
   11972 	/* Page 800 works differently than the rest so it has its own func */
   11973 	if (page == BM_WUC_PAGE) {
   11974 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11975 		goto release;
   11976 	}
   11977 
   11978 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11979 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11980 		    && (sc->sc_type != WM_T_82583))
   11981 			rv = wm_gmii_mdic_writereg(dev, phy,
   11982 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11983 		else
   11984 			rv = wm_gmii_mdic_writereg(dev, phy,
   11985 			    BME1000_PHY_PAGE_SELECT, page);
   11986 		if (rv != 0)
   11987 			goto release;
   11988 	}
   11989 
   11990 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11991 
   11992 release:
   11993 	sc->phy.release(sc);
   11994 	return rv;
   11995 }
   11996 
   11997 /*
   11998  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11999  *  @dev: pointer to the HW structure
   12000  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   12001  *
   12002  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   12003  *  address to store contents of the BM_WUC_ENABLE_REG register.
   12004  */
   12005 static int
   12006 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12007 {
   12008 #ifdef WM_DEBUG
   12009 	struct wm_softc *sc = device_private(dev);
   12010 #endif
   12011 	uint16_t temp;
   12012 	int rv;
   12013 
   12014 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12015 		device_xname(dev), __func__));
   12016 
   12017 	if (!phy_regp)
   12018 		return -1;
   12019 
   12020 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12021 
   12022 	/* Select Port Control Registers page */
   12023 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12024 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12025 	if (rv != 0)
   12026 		return rv;
   12027 
   12028 	/* Read WUCE and save it */
   12029 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12030 	if (rv != 0)
   12031 		return rv;
   12032 
   12033 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12034 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12035 	 */
   12036 	temp = *phy_regp;
   12037 	temp |= BM_WUC_ENABLE_BIT;
   12038 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12039 
   12040 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12041 		return rv;
   12042 
   12043 	/* Select Host Wakeup Registers page - caller now able to write
   12044 	 * registers on the Wakeup registers page
   12045 	 */
   12046 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12047 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12048 }
   12049 
   12050 /*
   12051  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12052  *  @dev: pointer to the HW structure
   12053  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12054  *
   12055  *  Restore BM_WUC_ENABLE_REG to its original value.
   12056  *
   12057  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12058  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12059  *  caller.
   12060  */
   12061 static int
   12062 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12063 {
   12064 #ifdef WM_DEBUG
   12065 	struct wm_softc *sc = device_private(dev);
   12066 #endif
   12067 
   12068 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12069 		device_xname(dev), __func__));
   12070 
   12071 	if (!phy_regp)
   12072 		return -1;
   12073 
   12074 	/* Select Port Control Registers page */
   12075 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12076 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12077 
   12078 	/* Restore 769.17 to its original value */
   12079 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12080 
   12081 	return 0;
   12082 }
   12083 
   12084 /*
   12085  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12086  *  @sc: pointer to the HW structure
   12087  *  @offset: register offset to be read or written
   12088  *  @val: pointer to the data to read or write
   12089  *  @rd: determines if operation is read or write
   12090  *  @page_set: BM_WUC_PAGE already set and access enabled
   12091  *
   12092  *  Read the PHY register at offset and store the retrieved information in
   12093  *  data, or write data to PHY register at offset.  Note the procedure to
   12094  *  access the PHY wakeup registers is different than reading the other PHY
   12095  *  registers. It works as such:
   12096  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12097  *  2) Set page to 800 for host (801 if we were manageability)
   12098  *  3) Write the address using the address opcode (0x11)
   12099  *  4) Read or write the data using the data opcode (0x12)
   12100  *  5) Restore 769.17.2 to its original value
   12101  *
   12102  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12103  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12104  *
   12105  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12106  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12107  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12108  */
   12109 static int
   12110 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12111     bool page_set)
   12112 {
   12113 	struct wm_softc *sc = device_private(dev);
   12114 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12115 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12116 	uint16_t wuce;
   12117 	int rv = 0;
   12118 
   12119 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12120 		device_xname(dev), __func__));
   12121 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12122 	if ((sc->sc_type == WM_T_PCH)
   12123 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12124 		device_printf(dev,
   12125 		    "Attempting to access page %d while gig enabled.\n", page);
   12126 	}
   12127 
   12128 	if (!page_set) {
   12129 		/* Enable access to PHY wakeup registers */
   12130 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12131 		if (rv != 0) {
   12132 			device_printf(dev,
   12133 			    "%s: Could not enable PHY wakeup reg access\n",
   12134 			    __func__);
   12135 			return rv;
   12136 		}
   12137 	}
   12138 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12139 		device_xname(sc->sc_dev), __func__, page, regnum));
   12140 
   12141 	/*
   12142 	 * 2) Access PHY wakeup register.
   12143 	 * See wm_access_phy_wakeup_reg_bm.
   12144 	 */
   12145 
   12146 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12147 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12148 	if (rv != 0)
   12149 		return rv;
   12150 
   12151 	if (rd) {
   12152 		/* Read the Wakeup register page value using opcode 0x12 */
   12153 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12154 	} else {
   12155 		/* Write the Wakeup register page value using opcode 0x12 */
   12156 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12157 	}
   12158 	if (rv != 0)
   12159 		return rv;
   12160 
   12161 	if (!page_set)
   12162 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12163 
   12164 	return rv;
   12165 }
   12166 
   12167 /*
   12168  * wm_gmii_hv_readreg:	[mii interface function]
   12169  *
   12170  *	Read a PHY register on the kumeran
   12171  * This could be handled by the PHY layer if we didn't have to lock the
   12172  * resource ...
   12173  */
   12174 static int
   12175 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12176 {
   12177 	struct wm_softc *sc = device_private(dev);
   12178 	int rv;
   12179 
   12180 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12181 		device_xname(dev), __func__));
   12182 
   12183 	rv = sc->phy.acquire(sc);
   12184 	if (rv != 0) {
   12185 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12186 		return rv;
   12187 	}
   12188 
   12189 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12190 	sc->phy.release(sc);
   12191 	return rv;
   12192 }
   12193 
   12194 static int
   12195 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12196 {
   12197 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12198 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12199 	int rv;
   12200 
   12201 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12202 
   12203 	/* Page 800 works differently than the rest so it has its own func */
   12204 	if (page == BM_WUC_PAGE)
   12205 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12206 
   12207 	/*
   12208 	 * Lower than page 768 works differently than the rest so it has its
   12209 	 * own func
   12210 	 */
   12211 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12212 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12213 		return -1;
   12214 	}
   12215 
   12216 	/*
   12217 	 * XXX I21[789] documents say that the SMBus Address register is at
   12218 	 * PHY address 01, Page 0 (not 768), Register 26.
   12219 	 */
   12220 	if (page == HV_INTC_FC_PAGE_START)
   12221 		page = 0;
   12222 
   12223 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12224 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12225 		    page << BME1000_PAGE_SHIFT);
   12226 		if (rv != 0)
   12227 			return rv;
   12228 	}
   12229 
   12230 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12231 }
   12232 
   12233 /*
   12234  * wm_gmii_hv_writereg:	[mii interface function]
   12235  *
   12236  *	Write a PHY register on the kumeran.
   12237  * This could be handled by the PHY layer if we didn't have to lock the
   12238  * resource ...
   12239  */
   12240 static int
   12241 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12242 {
   12243 	struct wm_softc *sc = device_private(dev);
   12244 	int rv;
   12245 
   12246 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12247 		device_xname(dev), __func__));
   12248 
   12249 	rv = sc->phy.acquire(sc);
   12250 	if (rv != 0) {
   12251 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12252 		return rv;
   12253 	}
   12254 
   12255 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12256 	sc->phy.release(sc);
   12257 
   12258 	return rv;
   12259 }
   12260 
   12261 static int
   12262 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12263 {
   12264 	struct wm_softc *sc = device_private(dev);
   12265 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12266 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12267 	int rv;
   12268 
   12269 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12270 
   12271 	/* Page 800 works differently than the rest so it has its own func */
   12272 	if (page == BM_WUC_PAGE)
   12273 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12274 		    false);
   12275 
   12276 	/*
   12277 	 * Lower than page 768 works differently than the rest so it has its
   12278 	 * own func
   12279 	 */
   12280 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12281 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12282 		return -1;
   12283 	}
   12284 
   12285 	{
   12286 		/*
   12287 		 * XXX I21[789] documents say that the SMBus Address register
   12288 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12289 		 */
   12290 		if (page == HV_INTC_FC_PAGE_START)
   12291 			page = 0;
   12292 
   12293 		/*
   12294 		 * XXX Workaround MDIO accesses being disabled after entering
   12295 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12296 		 * register is set)
   12297 		 */
   12298 		if (sc->sc_phytype == WMPHY_82578) {
   12299 			struct mii_softc *child;
   12300 
   12301 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12302 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12303 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12304 			    && ((val & (1 << 11)) != 0)) {
   12305 				device_printf(dev, "XXX need workaround\n");
   12306 			}
   12307 		}
   12308 
   12309 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12310 			rv = wm_gmii_mdic_writereg(dev, 1,
   12311 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12312 			if (rv != 0)
   12313 				return rv;
   12314 		}
   12315 	}
   12316 
   12317 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12318 }
   12319 
   12320 /*
   12321  * wm_gmii_82580_readreg:	[mii interface function]
   12322  *
   12323  *	Read a PHY register on the 82580 and I350.
   12324  * This could be handled by the PHY layer if we didn't have to lock the
   12325  * resource ...
   12326  */
   12327 static int
   12328 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12329 {
   12330 	struct wm_softc *sc = device_private(dev);
   12331 	int rv;
   12332 
   12333 	rv = sc->phy.acquire(sc);
   12334 	if (rv != 0) {
   12335 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12336 		return rv;
   12337 	}
   12338 
   12339 #ifdef DIAGNOSTIC
   12340 	if (reg > MII_ADDRMASK) {
   12341 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12342 		    __func__, sc->sc_phytype, reg);
   12343 		reg &= MII_ADDRMASK;
   12344 	}
   12345 #endif
   12346 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12347 
   12348 	sc->phy.release(sc);
   12349 	return rv;
   12350 }
   12351 
   12352 /*
   12353  * wm_gmii_82580_writereg:	[mii interface function]
   12354  *
   12355  *	Write a PHY register on the 82580 and I350.
   12356  * This could be handled by the PHY layer if we didn't have to lock the
   12357  * resource ...
   12358  */
   12359 static int
   12360 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12361 {
   12362 	struct wm_softc *sc = device_private(dev);
   12363 	int rv;
   12364 
   12365 	rv = sc->phy.acquire(sc);
   12366 	if (rv != 0) {
   12367 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12368 		return rv;
   12369 	}
   12370 
   12371 #ifdef DIAGNOSTIC
   12372 	if (reg > MII_ADDRMASK) {
   12373 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12374 		    __func__, sc->sc_phytype, reg);
   12375 		reg &= MII_ADDRMASK;
   12376 	}
   12377 #endif
   12378 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12379 
   12380 	sc->phy.release(sc);
   12381 	return rv;
   12382 }
   12383 
   12384 /*
   12385  * wm_gmii_gs40g_readreg:	[mii interface function]
   12386  *
   12387  *	Read a PHY register on the I2100 and I211.
   12388  * This could be handled by the PHY layer if we didn't have to lock the
   12389  * resource ...
   12390  */
   12391 static int
   12392 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12393 {
   12394 	struct wm_softc *sc = device_private(dev);
   12395 	int page, offset;
   12396 	int rv;
   12397 
   12398 	/* Acquire semaphore */
   12399 	rv = sc->phy.acquire(sc);
   12400 	if (rv != 0) {
   12401 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12402 		return rv;
   12403 	}
   12404 
   12405 	/* Page select */
   12406 	page = reg >> GS40G_PAGE_SHIFT;
   12407 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12408 	if (rv != 0)
   12409 		goto release;
   12410 
   12411 	/* Read reg */
   12412 	offset = reg & GS40G_OFFSET_MASK;
   12413 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12414 
   12415 release:
   12416 	sc->phy.release(sc);
   12417 	return rv;
   12418 }
   12419 
   12420 /*
   12421  * wm_gmii_gs40g_writereg:	[mii interface function]
   12422  *
   12423  *	Write a PHY register on the I210 and I211.
   12424  * This could be handled by the PHY layer if we didn't have to lock the
   12425  * resource ...
   12426  */
   12427 static int
   12428 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12429 {
   12430 	struct wm_softc *sc = device_private(dev);
   12431 	uint16_t page;
   12432 	int offset, rv;
   12433 
   12434 	/* Acquire semaphore */
   12435 	rv = sc->phy.acquire(sc);
   12436 	if (rv != 0) {
   12437 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12438 		return rv;
   12439 	}
   12440 
   12441 	/* Page select */
   12442 	page = reg >> GS40G_PAGE_SHIFT;
   12443 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12444 	if (rv != 0)
   12445 		goto release;
   12446 
   12447 	/* Write reg */
   12448 	offset = reg & GS40G_OFFSET_MASK;
   12449 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12450 
   12451 release:
   12452 	/* Release semaphore */
   12453 	sc->phy.release(sc);
   12454 	return rv;
   12455 }
   12456 
   12457 /*
   12458  * wm_gmii_statchg:	[mii interface function]
   12459  *
   12460  *	Callback from MII layer when media changes.
   12461  */
   12462 static void
   12463 wm_gmii_statchg(struct ifnet *ifp)
   12464 {
   12465 	struct wm_softc *sc = ifp->if_softc;
   12466 	struct mii_data *mii = &sc->sc_mii;
   12467 
   12468 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12469 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12470 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12471 
   12472 	/* Get flow control negotiation result. */
   12473 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12474 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12475 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12476 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12477 	}
   12478 
   12479 	if (sc->sc_flowflags & IFM_FLOW) {
   12480 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12481 			sc->sc_ctrl |= CTRL_TFCE;
   12482 			sc->sc_fcrtl |= FCRTL_XONE;
   12483 		}
   12484 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12485 			sc->sc_ctrl |= CTRL_RFCE;
   12486 	}
   12487 
   12488 	if (mii->mii_media_active & IFM_FDX) {
   12489 		DPRINTF(sc, WM_DEBUG_LINK,
   12490 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12491 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12492 	} else {
   12493 		DPRINTF(sc, WM_DEBUG_LINK,
   12494 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12495 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12496 	}
   12497 
   12498 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12499 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12500 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12501 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12502 	if (sc->sc_type == WM_T_80003) {
   12503 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12504 		case IFM_1000_T:
   12505 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12506 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12507 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12508 			break;
   12509 		default:
   12510 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12511 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12512 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12513 			break;
   12514 		}
   12515 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12516 	}
   12517 }
   12518 
   12519 /* kumeran related (80003, ICH* and PCH*) */
   12520 
   12521 /*
   12522  * wm_kmrn_readreg:
   12523  *
   12524  *	Read a kumeran register
   12525  */
   12526 static int
   12527 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12528 {
   12529 	int rv;
   12530 
   12531 	if (sc->sc_type == WM_T_80003)
   12532 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12533 	else
   12534 		rv = sc->phy.acquire(sc);
   12535 	if (rv != 0) {
   12536 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12537 		    __func__);
   12538 		return rv;
   12539 	}
   12540 
   12541 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12542 
   12543 	if (sc->sc_type == WM_T_80003)
   12544 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12545 	else
   12546 		sc->phy.release(sc);
   12547 
   12548 	return rv;
   12549 }
   12550 
   12551 static int
   12552 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12553 {
   12554 
   12555 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12556 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12557 	    KUMCTRLSTA_REN);
   12558 	CSR_WRITE_FLUSH(sc);
   12559 	delay(2);
   12560 
   12561 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12562 
   12563 	return 0;
   12564 }
   12565 
   12566 /*
   12567  * wm_kmrn_writereg:
   12568  *
   12569  *	Write a kumeran register
   12570  */
   12571 static int
   12572 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12573 {
   12574 	int rv;
   12575 
   12576 	if (sc->sc_type == WM_T_80003)
   12577 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12578 	else
   12579 		rv = sc->phy.acquire(sc);
   12580 	if (rv != 0) {
   12581 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12582 		    __func__);
   12583 		return rv;
   12584 	}
   12585 
   12586 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12587 
   12588 	if (sc->sc_type == WM_T_80003)
   12589 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12590 	else
   12591 		sc->phy.release(sc);
   12592 
   12593 	return rv;
   12594 }
   12595 
   12596 static int
   12597 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12598 {
   12599 
   12600 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12601 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12602 
   12603 	return 0;
   12604 }
   12605 
   12606 /*
   12607  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12608  * This access method is different from IEEE MMD.
   12609  */
   12610 static int
   12611 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12612 {
   12613 	struct wm_softc *sc = device_private(dev);
   12614 	int rv;
   12615 
   12616 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12617 	if (rv != 0)
   12618 		return rv;
   12619 
   12620 	if (rd)
   12621 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12622 	else
   12623 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12624 	return rv;
   12625 }
   12626 
   12627 static int
   12628 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12629 {
   12630 
   12631 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12632 }
   12633 
   12634 static int
   12635 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12636 {
   12637 
   12638 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12639 }
   12640 
   12641 /* SGMII related */
   12642 
   12643 /*
   12644  * wm_sgmii_uses_mdio
   12645  *
   12646  * Check whether the transaction is to the internal PHY or the external
   12647  * MDIO interface. Return true if it's MDIO.
   12648  */
   12649 static bool
   12650 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12651 {
   12652 	uint32_t reg;
   12653 	bool ismdio = false;
   12654 
   12655 	switch (sc->sc_type) {
   12656 	case WM_T_82575:
   12657 	case WM_T_82576:
   12658 		reg = CSR_READ(sc, WMREG_MDIC);
   12659 		ismdio = ((reg & MDIC_DEST) != 0);
   12660 		break;
   12661 	case WM_T_82580:
   12662 	case WM_T_I350:
   12663 	case WM_T_I354:
   12664 	case WM_T_I210:
   12665 	case WM_T_I211:
   12666 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12667 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12668 		break;
   12669 	default:
   12670 		break;
   12671 	}
   12672 
   12673 	return ismdio;
   12674 }
   12675 
   12676 /* Setup internal SGMII PHY for SFP */
   12677 static void
   12678 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12679 {
   12680 	uint16_t id1, id2, phyreg;
   12681 	int i, rv;
   12682 
   12683 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12684 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12685 		return;
   12686 
   12687 	for (i = 0; i < MII_NPHY; i++) {
   12688 		sc->phy.no_errprint = true;
   12689 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12690 		if (rv != 0)
   12691 			continue;
   12692 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12693 		if (rv != 0)
   12694 			continue;
   12695 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12696 			continue;
   12697 		sc->phy.no_errprint = false;
   12698 
   12699 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12700 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12701 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12702 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12703 		break;
   12704 	}
   12705 
   12706 }
   12707 
   12708 /*
   12709  * wm_sgmii_readreg:	[mii interface function]
   12710  *
   12711  *	Read a PHY register on the SGMII
   12712  * This could be handled by the PHY layer if we didn't have to lock the
   12713  * resource ...
   12714  */
   12715 static int
   12716 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12717 {
   12718 	struct wm_softc *sc = device_private(dev);
   12719 	int rv;
   12720 
   12721 	rv = sc->phy.acquire(sc);
   12722 	if (rv != 0) {
   12723 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12724 		return rv;
   12725 	}
   12726 
   12727 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12728 
   12729 	sc->phy.release(sc);
   12730 	return rv;
   12731 }
   12732 
   12733 static int
   12734 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12735 {
   12736 	struct wm_softc *sc = device_private(dev);
   12737 	uint32_t i2ccmd;
   12738 	int i, rv = 0;
   12739 
   12740 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12741 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12742 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12743 
   12744 	/* Poll the ready bit */
   12745 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12746 		delay(50);
   12747 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12748 		if (i2ccmd & I2CCMD_READY)
   12749 			break;
   12750 	}
   12751 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12752 		device_printf(dev, "I2CCMD Read did not complete\n");
   12753 		rv = ETIMEDOUT;
   12754 	}
   12755 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12756 		if (!sc->phy.no_errprint)
   12757 			device_printf(dev, "I2CCMD Error bit set\n");
   12758 		rv = EIO;
   12759 	}
   12760 
   12761 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12762 
   12763 	return rv;
   12764 }
   12765 
   12766 /*
   12767  * wm_sgmii_writereg:	[mii interface function]
   12768  *
   12769  *	Write a PHY register on the SGMII.
   12770  * This could be handled by the PHY layer if we didn't have to lock the
   12771  * resource ...
   12772  */
   12773 static int
   12774 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12775 {
   12776 	struct wm_softc *sc = device_private(dev);
   12777 	int rv;
   12778 
   12779 	rv = sc->phy.acquire(sc);
   12780 	if (rv != 0) {
   12781 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12782 		return rv;
   12783 	}
   12784 
   12785 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12786 
   12787 	sc->phy.release(sc);
   12788 
   12789 	return rv;
   12790 }
   12791 
   12792 static int
   12793 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12794 {
   12795 	struct wm_softc *sc = device_private(dev);
   12796 	uint32_t i2ccmd;
   12797 	uint16_t swapdata;
   12798 	int rv = 0;
   12799 	int i;
   12800 
   12801 	/* Swap the data bytes for the I2C interface */
   12802 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12803 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12804 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12805 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12806 
   12807 	/* Poll the ready bit */
   12808 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12809 		delay(50);
   12810 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12811 		if (i2ccmd & I2CCMD_READY)
   12812 			break;
   12813 	}
   12814 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12815 		device_printf(dev, "I2CCMD Write did not complete\n");
   12816 		rv = ETIMEDOUT;
   12817 	}
   12818 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12819 		device_printf(dev, "I2CCMD Error bit set\n");
   12820 		rv = EIO;
   12821 	}
   12822 
   12823 	return rv;
   12824 }
   12825 
   12826 /* TBI related */
   12827 
   12828 static bool
   12829 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12830 {
   12831 	bool sig;
   12832 
   12833 	sig = ctrl & CTRL_SWDPIN(1);
   12834 
   12835 	/*
   12836 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12837 	 * detect a signal, 1 if they don't.
   12838 	 */
   12839 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12840 		sig = !sig;
   12841 
   12842 	return sig;
   12843 }
   12844 
   12845 /*
   12846  * wm_tbi_mediainit:
   12847  *
   12848  *	Initialize media for use on 1000BASE-X devices.
   12849  */
   12850 static void
   12851 wm_tbi_mediainit(struct wm_softc *sc)
   12852 {
   12853 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12854 	const char *sep = "";
   12855 
   12856 	if (sc->sc_type < WM_T_82543)
   12857 		sc->sc_tipg = TIPG_WM_DFLT;
   12858 	else
   12859 		sc->sc_tipg = TIPG_LG_DFLT;
   12860 
   12861 	sc->sc_tbi_serdes_anegticks = 5;
   12862 
   12863 	/* Initialize our media structures */
   12864 	sc->sc_mii.mii_ifp = ifp;
   12865 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12866 
   12867 	ifp->if_baudrate = IF_Gbps(1);
   12868 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12869 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12870 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12871 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12872 		    sc->sc_core_lock);
   12873 	} else {
   12874 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12875 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12876 	}
   12877 
   12878 	/*
   12879 	 * SWD Pins:
   12880 	 *
   12881 	 *	0 = Link LED (output)
   12882 	 *	1 = Loss Of Signal (input)
   12883 	 */
   12884 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12885 
   12886 	/* XXX Perhaps this is only for TBI */
   12887 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12888 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12889 
   12890 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12891 		sc->sc_ctrl &= ~CTRL_LRST;
   12892 
   12893 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12894 
   12895 #define	ADD(ss, mm, dd)							  \
   12896 do {									  \
   12897 	aprint_normal("%s%s", sep, ss);					  \
   12898 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12899 	sep = ", ";							  \
   12900 } while (/*CONSTCOND*/0)
   12901 
   12902 	aprint_normal_dev(sc->sc_dev, "");
   12903 
   12904 	if (sc->sc_type == WM_T_I354) {
   12905 		uint32_t status;
   12906 
   12907 		status = CSR_READ(sc, WMREG_STATUS);
   12908 		if (((status & STATUS_2P5_SKU) != 0)
   12909 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12910 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12911 		} else
   12912 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12913 	} else if (sc->sc_type == WM_T_82545) {
   12914 		/* Only 82545 is LX (XXX except SFP) */
   12915 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12916 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12917 	} else if (sc->sc_sfptype != 0) {
   12918 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12919 		switch (sc->sc_sfptype) {
   12920 		default:
   12921 		case SFF_SFP_ETH_FLAGS_1000SX:
   12922 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12923 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12924 			break;
   12925 		case SFF_SFP_ETH_FLAGS_1000LX:
   12926 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12927 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12928 			break;
   12929 		case SFF_SFP_ETH_FLAGS_1000CX:
   12930 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12931 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12932 			break;
   12933 		case SFF_SFP_ETH_FLAGS_1000T:
   12934 			ADD("1000baseT", IFM_1000_T, 0);
   12935 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12936 			break;
   12937 		case SFF_SFP_ETH_FLAGS_100FX:
   12938 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12939 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12940 			break;
   12941 		}
   12942 	} else {
   12943 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12944 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12945 	}
   12946 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12947 	aprint_normal("\n");
   12948 
   12949 #undef ADD
   12950 
   12951 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12952 }
   12953 
   12954 /*
   12955  * wm_tbi_mediachange:	[ifmedia interface function]
   12956  *
   12957  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12958  */
   12959 static int
   12960 wm_tbi_mediachange(struct ifnet *ifp)
   12961 {
   12962 	struct wm_softc *sc = ifp->if_softc;
   12963 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12964 	uint32_t status, ctrl;
   12965 	bool signal;
   12966 	int i;
   12967 
   12968 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12969 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12970 		/* XXX need some work for >= 82571 and < 82575 */
   12971 		if (sc->sc_type < WM_T_82575)
   12972 			return 0;
   12973 	}
   12974 
   12975 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12976 	    || (sc->sc_type >= WM_T_82575))
   12977 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12978 
   12979 	sc->sc_ctrl &= ~CTRL_LRST;
   12980 	sc->sc_txcw = TXCW_ANE;
   12981 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12982 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12983 	else if (ife->ifm_media & IFM_FDX)
   12984 		sc->sc_txcw |= TXCW_FD;
   12985 	else
   12986 		sc->sc_txcw |= TXCW_HD;
   12987 
   12988 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12989 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12990 
   12991 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12992 		device_xname(sc->sc_dev), sc->sc_txcw));
   12993 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12994 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12995 	CSR_WRITE_FLUSH(sc);
   12996 	delay(1000);
   12997 
   12998 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12999 	signal = wm_tbi_havesignal(sc, ctrl);
   13000 
   13001 	DPRINTF(sc, WM_DEBUG_LINK,
   13002 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   13003 
   13004 	if (signal) {
   13005 		/* Have signal; wait for the link to come up. */
   13006 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   13007 			delay(10000);
   13008 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   13009 				break;
   13010 		}
   13011 
   13012 		DPRINTF(sc, WM_DEBUG_LINK,
   13013 		    ("%s: i = %d after waiting for link\n",
   13014 			device_xname(sc->sc_dev), i));
   13015 
   13016 		status = CSR_READ(sc, WMREG_STATUS);
   13017 		DPRINTF(sc, WM_DEBUG_LINK,
   13018 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13019 			__PRIxBIT "\n",
   13020 			device_xname(sc->sc_dev), status, STATUS_LU));
   13021 		if (status & STATUS_LU) {
   13022 			/* Link is up. */
   13023 			DPRINTF(sc, WM_DEBUG_LINK,
   13024 			    ("%s: LINK: set media -> link up %s\n",
   13025 				device_xname(sc->sc_dev),
   13026 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13027 
   13028 			/*
   13029 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13030 			 * so we should update sc->sc_ctrl
   13031 			 */
   13032 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13033 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13034 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13035 			if (status & STATUS_FD)
   13036 				sc->sc_tctl |=
   13037 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13038 			else
   13039 				sc->sc_tctl |=
   13040 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13041 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13042 				sc->sc_fcrtl |= FCRTL_XONE;
   13043 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13044 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13045 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13046 			sc->sc_tbi_linkup = 1;
   13047 		} else {
   13048 			if (i == WM_LINKUP_TIMEOUT)
   13049 				wm_check_for_link(sc);
   13050 			/* Link is down. */
   13051 			DPRINTF(sc, WM_DEBUG_LINK,
   13052 			    ("%s: LINK: set media -> link down\n",
   13053 				device_xname(sc->sc_dev)));
   13054 			sc->sc_tbi_linkup = 0;
   13055 		}
   13056 	} else {
   13057 		DPRINTF(sc, WM_DEBUG_LINK,
   13058 		    ("%s: LINK: set media -> no signal\n",
   13059 			device_xname(sc->sc_dev)));
   13060 		sc->sc_tbi_linkup = 0;
   13061 	}
   13062 
   13063 	wm_tbi_serdes_set_linkled(sc);
   13064 
   13065 	return 0;
   13066 }
   13067 
   13068 /*
   13069  * wm_tbi_mediastatus:	[ifmedia interface function]
   13070  *
   13071  *	Get the current interface media status on a 1000BASE-X device.
   13072  */
   13073 static void
   13074 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13075 {
   13076 	struct wm_softc *sc = ifp->if_softc;
   13077 	uint32_t ctrl, status;
   13078 
   13079 	ifmr->ifm_status = IFM_AVALID;
   13080 	ifmr->ifm_active = IFM_ETHER;
   13081 
   13082 	status = CSR_READ(sc, WMREG_STATUS);
   13083 	if ((status & STATUS_LU) == 0) {
   13084 		ifmr->ifm_active |= IFM_NONE;
   13085 		return;
   13086 	}
   13087 
   13088 	ifmr->ifm_status |= IFM_ACTIVE;
   13089 	/* Only 82545 is LX */
   13090 	if (sc->sc_type == WM_T_82545)
   13091 		ifmr->ifm_active |= IFM_1000_LX;
   13092 	else
   13093 		ifmr->ifm_active |= IFM_1000_SX;
   13094 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13095 		ifmr->ifm_active |= IFM_FDX;
   13096 	else
   13097 		ifmr->ifm_active |= IFM_HDX;
   13098 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13099 	if (ctrl & CTRL_RFCE)
   13100 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13101 	if (ctrl & CTRL_TFCE)
   13102 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13103 }
   13104 
   13105 /* XXX TBI only */
   13106 static int
   13107 wm_check_for_link(struct wm_softc *sc)
   13108 {
   13109 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13110 	uint32_t rxcw;
   13111 	uint32_t ctrl;
   13112 	uint32_t status;
   13113 	bool signal;
   13114 
   13115 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13116 		device_xname(sc->sc_dev), __func__));
   13117 
   13118 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13119 		/* XXX need some work for >= 82571 */
   13120 		if (sc->sc_type >= WM_T_82571) {
   13121 			sc->sc_tbi_linkup = 1;
   13122 			return 0;
   13123 		}
   13124 	}
   13125 
   13126 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13127 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13128 	status = CSR_READ(sc, WMREG_STATUS);
   13129 	signal = wm_tbi_havesignal(sc, ctrl);
   13130 
   13131 	DPRINTF(sc, WM_DEBUG_LINK,
   13132 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13133 		device_xname(sc->sc_dev), __func__, signal,
   13134 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13135 
   13136 	/*
   13137 	 * SWDPIN   LU RXCW
   13138 	 *	0    0	  0
   13139 	 *	0    0	  1	(should not happen)
   13140 	 *	0    1	  0	(should not happen)
   13141 	 *	0    1	  1	(should not happen)
   13142 	 *	1    0	  0	Disable autonego and force linkup
   13143 	 *	1    0	  1	got /C/ but not linkup yet
   13144 	 *	1    1	  0	(linkup)
   13145 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13146 	 *
   13147 	 */
   13148 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13149 		DPRINTF(sc, WM_DEBUG_LINK,
   13150 		    ("%s: %s: force linkup and fullduplex\n",
   13151 			device_xname(sc->sc_dev), __func__));
   13152 		sc->sc_tbi_linkup = 0;
   13153 		/* Disable auto-negotiation in the TXCW register */
   13154 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13155 
   13156 		/*
   13157 		 * Force link-up and also force full-duplex.
   13158 		 *
   13159 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13160 		 * so we should update sc->sc_ctrl
   13161 		 */
   13162 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13163 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13164 	} else if (((status & STATUS_LU) != 0)
   13165 	    && ((rxcw & RXCW_C) != 0)
   13166 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13167 		sc->sc_tbi_linkup = 1;
   13168 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13169 			device_xname(sc->sc_dev), __func__));
   13170 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13171 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13172 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13173 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13174 			device_xname(sc->sc_dev), __func__));
   13175 	} else {
   13176 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13177 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13178 			status));
   13179 	}
   13180 
   13181 	return 0;
   13182 }
   13183 
   13184 /*
   13185  * wm_tbi_tick:
   13186  *
   13187  *	Check the link on TBI devices.
   13188  *	This function acts as mii_tick().
   13189  */
   13190 static void
   13191 wm_tbi_tick(struct wm_softc *sc)
   13192 {
   13193 	struct mii_data *mii = &sc->sc_mii;
   13194 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13195 	uint32_t status;
   13196 
   13197 	KASSERT(mutex_owned(sc->sc_core_lock));
   13198 
   13199 	status = CSR_READ(sc, WMREG_STATUS);
   13200 
   13201 	/* XXX is this needed? */
   13202 	(void)CSR_READ(sc, WMREG_RXCW);
   13203 	(void)CSR_READ(sc, WMREG_CTRL);
   13204 
   13205 	/* set link status */
   13206 	if ((status & STATUS_LU) == 0) {
   13207 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13208 			device_xname(sc->sc_dev)));
   13209 		sc->sc_tbi_linkup = 0;
   13210 	} else if (sc->sc_tbi_linkup == 0) {
   13211 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13212 			device_xname(sc->sc_dev),
   13213 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13214 		sc->sc_tbi_linkup = 1;
   13215 		sc->sc_tbi_serdes_ticks = 0;
   13216 	}
   13217 
   13218 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13219 		goto setled;
   13220 
   13221 	if ((status & STATUS_LU) == 0) {
   13222 		sc->sc_tbi_linkup = 0;
   13223 		/* If the timer expired, retry autonegotiation */
   13224 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13225 		    && (++sc->sc_tbi_serdes_ticks
   13226 			>= sc->sc_tbi_serdes_anegticks)) {
   13227 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13228 				device_xname(sc->sc_dev), __func__));
   13229 			sc->sc_tbi_serdes_ticks = 0;
   13230 			/*
   13231 			 * Reset the link, and let autonegotiation do
   13232 			 * its thing
   13233 			 */
   13234 			sc->sc_ctrl |= CTRL_LRST;
   13235 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13236 			CSR_WRITE_FLUSH(sc);
   13237 			delay(1000);
   13238 			sc->sc_ctrl &= ~CTRL_LRST;
   13239 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13240 			CSR_WRITE_FLUSH(sc);
   13241 			delay(1000);
   13242 			CSR_WRITE(sc, WMREG_TXCW,
   13243 			    sc->sc_txcw & ~TXCW_ANE);
   13244 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13245 		}
   13246 	}
   13247 
   13248 setled:
   13249 	wm_tbi_serdes_set_linkled(sc);
   13250 }
   13251 
   13252 /* SERDES related */
   13253 static void
   13254 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13255 {
   13256 	uint32_t reg;
   13257 
   13258 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13259 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13260 		return;
   13261 
   13262 	/* Enable PCS to turn on link */
   13263 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13264 	reg |= PCS_CFG_PCS_EN;
   13265 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13266 
   13267 	/* Power up the laser */
   13268 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13269 	reg &= ~CTRL_EXT_SWDPIN(3);
   13270 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13271 
   13272 	/* Flush the write to verify completion */
   13273 	CSR_WRITE_FLUSH(sc);
   13274 	delay(1000);
   13275 }
   13276 
   13277 static int
   13278 wm_serdes_mediachange(struct ifnet *ifp)
   13279 {
   13280 	struct wm_softc *sc = ifp->if_softc;
   13281 	bool pcs_autoneg = true; /* XXX */
   13282 	uint32_t ctrl_ext, pcs_lctl, reg;
   13283 
   13284 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13285 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13286 		return 0;
   13287 
   13288 	/* XXX Currently, this function is not called on 8257[12] */
   13289 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13290 	    || (sc->sc_type >= WM_T_82575))
   13291 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13292 
   13293 	/* Power on the sfp cage if present */
   13294 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13295 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13296 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13297 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13298 
   13299 	sc->sc_ctrl |= CTRL_SLU;
   13300 
   13301 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13302 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13303 
   13304 		reg = CSR_READ(sc, WMREG_CONNSW);
   13305 		reg |= CONNSW_ENRGSRC;
   13306 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13307 	}
   13308 
   13309 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13310 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13311 	case CTRL_EXT_LINK_MODE_SGMII:
   13312 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13313 		pcs_autoneg = true;
   13314 		/* Autoneg time out should be disabled for SGMII mode */
   13315 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13316 		break;
   13317 	case CTRL_EXT_LINK_MODE_1000KX:
   13318 		pcs_autoneg = false;
   13319 		/* FALLTHROUGH */
   13320 	default:
   13321 		if ((sc->sc_type == WM_T_82575)
   13322 		    || (sc->sc_type == WM_T_82576)) {
   13323 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13324 				pcs_autoneg = false;
   13325 		}
   13326 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13327 		    | CTRL_FRCFDX;
   13328 
   13329 		/* Set speed of 1000/Full if speed/duplex is forced */
   13330 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13331 	}
   13332 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13333 
   13334 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13335 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13336 
   13337 	if (pcs_autoneg) {
   13338 		/* Set PCS register for autoneg */
   13339 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13340 
   13341 		/* Disable force flow control for autoneg */
   13342 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13343 
   13344 		/* Configure flow control advertisement for autoneg */
   13345 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13346 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13347 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13348 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13349 	} else
   13350 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13351 
   13352 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13353 
   13354 	return 0;
   13355 }
   13356 
   13357 static void
   13358 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13359 {
   13360 	struct wm_softc *sc = ifp->if_softc;
   13361 	struct mii_data *mii = &sc->sc_mii;
   13362 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13363 	uint32_t pcs_adv, pcs_lpab, reg;
   13364 
   13365 	ifmr->ifm_status = IFM_AVALID;
   13366 	ifmr->ifm_active = IFM_ETHER;
   13367 
   13368 	/* Check PCS */
   13369 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13370 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13371 		ifmr->ifm_active |= IFM_NONE;
   13372 		sc->sc_tbi_linkup = 0;
   13373 		goto setled;
   13374 	}
   13375 
   13376 	sc->sc_tbi_linkup = 1;
   13377 	ifmr->ifm_status |= IFM_ACTIVE;
   13378 	if (sc->sc_type == WM_T_I354) {
   13379 		uint32_t status;
   13380 
   13381 		status = CSR_READ(sc, WMREG_STATUS);
   13382 		if (((status & STATUS_2P5_SKU) != 0)
   13383 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13384 			ifmr->ifm_active |= IFM_2500_KX;
   13385 		} else
   13386 			ifmr->ifm_active |= IFM_1000_KX;
   13387 	} else {
   13388 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13389 		case PCS_LSTS_SPEED_10:
   13390 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13391 			break;
   13392 		case PCS_LSTS_SPEED_100:
   13393 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13394 			break;
   13395 		case PCS_LSTS_SPEED_1000:
   13396 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13397 			break;
   13398 		default:
   13399 			device_printf(sc->sc_dev, "Unknown speed\n");
   13400 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13401 			break;
   13402 		}
   13403 	}
   13404 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13405 	if ((reg & PCS_LSTS_FDX) != 0)
   13406 		ifmr->ifm_active |= IFM_FDX;
   13407 	else
   13408 		ifmr->ifm_active |= IFM_HDX;
   13409 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13410 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13411 		/* Check flow */
   13412 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13413 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13414 			DPRINTF(sc, WM_DEBUG_LINK,
   13415 			    ("XXX LINKOK but not ACOMP\n"));
   13416 			goto setled;
   13417 		}
   13418 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13419 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13420 		DPRINTF(sc, WM_DEBUG_LINK,
   13421 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13422 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13423 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13424 			mii->mii_media_active |= IFM_FLOW
   13425 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13426 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13427 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13428 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13429 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13430 			mii->mii_media_active |= IFM_FLOW
   13431 			    | IFM_ETH_TXPAUSE;
   13432 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13433 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13434 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13435 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13436 			mii->mii_media_active |= IFM_FLOW
   13437 			    | IFM_ETH_RXPAUSE;
   13438 		}
   13439 	}
   13440 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13441 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13442 setled:
   13443 	wm_tbi_serdes_set_linkled(sc);
   13444 }
   13445 
   13446 /*
   13447  * wm_serdes_tick:
   13448  *
   13449  *	Check the link on serdes devices.
   13450  */
   13451 static void
   13452 wm_serdes_tick(struct wm_softc *sc)
   13453 {
   13454 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13455 	struct mii_data *mii = &sc->sc_mii;
   13456 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13457 	uint32_t reg;
   13458 
   13459 	KASSERT(mutex_owned(sc->sc_core_lock));
   13460 
   13461 	mii->mii_media_status = IFM_AVALID;
   13462 	mii->mii_media_active = IFM_ETHER;
   13463 
   13464 	/* Check PCS */
   13465 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13466 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13467 		mii->mii_media_status |= IFM_ACTIVE;
   13468 		sc->sc_tbi_linkup = 1;
   13469 		sc->sc_tbi_serdes_ticks = 0;
   13470 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13471 		if ((reg & PCS_LSTS_FDX) != 0)
   13472 			mii->mii_media_active |= IFM_FDX;
   13473 		else
   13474 			mii->mii_media_active |= IFM_HDX;
   13475 	} else {
   13476 		mii->mii_media_status |= IFM_NONE;
   13477 		sc->sc_tbi_linkup = 0;
   13478 		/* If the timer expired, retry autonegotiation */
   13479 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13480 		    && (++sc->sc_tbi_serdes_ticks
   13481 			>= sc->sc_tbi_serdes_anegticks)) {
   13482 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13483 				device_xname(sc->sc_dev), __func__));
   13484 			sc->sc_tbi_serdes_ticks = 0;
   13485 			/* XXX */
   13486 			wm_serdes_mediachange(ifp);
   13487 		}
   13488 	}
   13489 
   13490 	wm_tbi_serdes_set_linkled(sc);
   13491 }
   13492 
   13493 /* SFP related */
   13494 
   13495 static int
   13496 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13497 {
   13498 	uint32_t i2ccmd;
   13499 	int i;
   13500 
   13501 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13502 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13503 
   13504 	/* Poll the ready bit */
   13505 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13506 		delay(50);
   13507 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13508 		if (i2ccmd & I2CCMD_READY)
   13509 			break;
   13510 	}
   13511 	if ((i2ccmd & I2CCMD_READY) == 0)
   13512 		return -1;
   13513 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13514 		return -1;
   13515 
   13516 	*data = i2ccmd & 0x00ff;
   13517 
   13518 	return 0;
   13519 }
   13520 
   13521 static uint32_t
   13522 wm_sfp_get_media_type(struct wm_softc *sc)
   13523 {
   13524 	uint32_t ctrl_ext;
   13525 	uint8_t val = 0;
   13526 	int timeout = 3;
   13527 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13528 	int rv = -1;
   13529 
   13530 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13531 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13532 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13533 	CSR_WRITE_FLUSH(sc);
   13534 
   13535 	/* Read SFP module data */
   13536 	while (timeout) {
   13537 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13538 		if (rv == 0)
   13539 			break;
   13540 		delay(100*1000); /* XXX too big */
   13541 		timeout--;
   13542 	}
   13543 	if (rv != 0)
   13544 		goto out;
   13545 
   13546 	switch (val) {
   13547 	case SFF_SFP_ID_SFF:
   13548 		aprint_normal_dev(sc->sc_dev,
   13549 		    "Module/Connector soldered to board\n");
   13550 		break;
   13551 	case SFF_SFP_ID_SFP:
   13552 		sc->sc_flags |= WM_F_SFP;
   13553 		break;
   13554 	case SFF_SFP_ID_UNKNOWN:
   13555 		goto out;
   13556 	default:
   13557 		break;
   13558 	}
   13559 
   13560 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13561 	if (rv != 0)
   13562 		goto out;
   13563 
   13564 	sc->sc_sfptype = val;
   13565 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13566 		mediatype = WM_MEDIATYPE_SERDES;
   13567 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13568 		sc->sc_flags |= WM_F_SGMII;
   13569 		mediatype = WM_MEDIATYPE_COPPER;
   13570 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13571 		sc->sc_flags |= WM_F_SGMII;
   13572 		mediatype = WM_MEDIATYPE_SERDES;
   13573 	} else {
   13574 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13575 		    __func__, sc->sc_sfptype);
   13576 		sc->sc_sfptype = 0; /* XXX unknown */
   13577 	}
   13578 
   13579 out:
   13580 	/* Restore I2C interface setting */
   13581 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13582 
   13583 	return mediatype;
   13584 }
   13585 
   13586 /*
   13587  * NVM related.
   13588  * Microwire, SPI (w/wo EERD) and Flash.
   13589  */
   13590 
   13591 /* Both spi and uwire */
   13592 
   13593 /*
   13594  * wm_eeprom_sendbits:
   13595  *
   13596  *	Send a series of bits to the EEPROM.
   13597  */
   13598 static void
   13599 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13600 {
   13601 	uint32_t reg;
   13602 	int x;
   13603 
   13604 	reg = CSR_READ(sc, WMREG_EECD);
   13605 
   13606 	for (x = nbits; x > 0; x--) {
   13607 		if (bits & (1U << (x - 1)))
   13608 			reg |= EECD_DI;
   13609 		else
   13610 			reg &= ~EECD_DI;
   13611 		CSR_WRITE(sc, WMREG_EECD, reg);
   13612 		CSR_WRITE_FLUSH(sc);
   13613 		delay(2);
   13614 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13615 		CSR_WRITE_FLUSH(sc);
   13616 		delay(2);
   13617 		CSR_WRITE(sc, WMREG_EECD, reg);
   13618 		CSR_WRITE_FLUSH(sc);
   13619 		delay(2);
   13620 	}
   13621 }
   13622 
   13623 /*
   13624  * wm_eeprom_recvbits:
   13625  *
   13626  *	Receive a series of bits from the EEPROM.
   13627  */
   13628 static void
   13629 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13630 {
   13631 	uint32_t reg, val;
   13632 	int x;
   13633 
   13634 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13635 
   13636 	val = 0;
   13637 	for (x = nbits; x > 0; x--) {
   13638 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13639 		CSR_WRITE_FLUSH(sc);
   13640 		delay(2);
   13641 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13642 			val |= (1U << (x - 1));
   13643 		CSR_WRITE(sc, WMREG_EECD, reg);
   13644 		CSR_WRITE_FLUSH(sc);
   13645 		delay(2);
   13646 	}
   13647 	*valp = val;
   13648 }
   13649 
   13650 /* Microwire */
   13651 
   13652 /*
   13653  * wm_nvm_read_uwire:
   13654  *
   13655  *	Read a word from the EEPROM using the MicroWire protocol.
   13656  */
   13657 static int
   13658 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13659 {
   13660 	uint32_t reg, val;
   13661 	int i, rv;
   13662 
   13663 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13664 		device_xname(sc->sc_dev), __func__));
   13665 
   13666 	rv = sc->nvm.acquire(sc);
   13667 	if (rv != 0)
   13668 		return rv;
   13669 
   13670 	for (i = 0; i < wordcnt; i++) {
   13671 		/* Clear SK and DI. */
   13672 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13673 		CSR_WRITE(sc, WMREG_EECD, reg);
   13674 
   13675 		/*
   13676 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13677 		 * and Xen.
   13678 		 *
   13679 		 * We use this workaround only for 82540 because qemu's
   13680 		 * e1000 act as 82540.
   13681 		 */
   13682 		if (sc->sc_type == WM_T_82540) {
   13683 			reg |= EECD_SK;
   13684 			CSR_WRITE(sc, WMREG_EECD, reg);
   13685 			reg &= ~EECD_SK;
   13686 			CSR_WRITE(sc, WMREG_EECD, reg);
   13687 			CSR_WRITE_FLUSH(sc);
   13688 			delay(2);
   13689 		}
   13690 		/* XXX: end of workaround */
   13691 
   13692 		/* Set CHIP SELECT. */
   13693 		reg |= EECD_CS;
   13694 		CSR_WRITE(sc, WMREG_EECD, reg);
   13695 		CSR_WRITE_FLUSH(sc);
   13696 		delay(2);
   13697 
   13698 		/* Shift in the READ command. */
   13699 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13700 
   13701 		/* Shift in address. */
   13702 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13703 
   13704 		/* Shift out the data. */
   13705 		wm_eeprom_recvbits(sc, &val, 16);
   13706 		data[i] = val & 0xffff;
   13707 
   13708 		/* Clear CHIP SELECT. */
   13709 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13710 		CSR_WRITE(sc, WMREG_EECD, reg);
   13711 		CSR_WRITE_FLUSH(sc);
   13712 		delay(2);
   13713 	}
   13714 
   13715 	sc->nvm.release(sc);
   13716 	return 0;
   13717 }
   13718 
   13719 /* SPI */
   13720 
   13721 /*
   13722  * Set SPI and FLASH related information from the EECD register.
   13723  * For 82541 and 82547, the word size is taken from EEPROM.
   13724  */
   13725 static int
   13726 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13727 {
   13728 	int size;
   13729 	uint32_t reg;
   13730 	uint16_t data;
   13731 
   13732 	reg = CSR_READ(sc, WMREG_EECD);
   13733 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13734 
   13735 	/* Read the size of NVM from EECD by default */
   13736 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13737 	switch (sc->sc_type) {
   13738 	case WM_T_82541:
   13739 	case WM_T_82541_2:
   13740 	case WM_T_82547:
   13741 	case WM_T_82547_2:
   13742 		/* Set dummy value to access EEPROM */
   13743 		sc->sc_nvm_wordsize = 64;
   13744 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13745 			aprint_error_dev(sc->sc_dev,
   13746 			    "%s: failed to read EEPROM size\n", __func__);
   13747 		}
   13748 		reg = data;
   13749 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13750 		if (size == 0)
   13751 			size = 6; /* 64 word size */
   13752 		else
   13753 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13754 		break;
   13755 	case WM_T_80003:
   13756 	case WM_T_82571:
   13757 	case WM_T_82572:
   13758 	case WM_T_82573: /* SPI case */
   13759 	case WM_T_82574: /* SPI case */
   13760 	case WM_T_82583: /* SPI case */
   13761 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13762 		if (size > 14)
   13763 			size = 14;
   13764 		break;
   13765 	case WM_T_82575:
   13766 	case WM_T_82576:
   13767 	case WM_T_82580:
   13768 	case WM_T_I350:
   13769 	case WM_T_I354:
   13770 	case WM_T_I210:
   13771 	case WM_T_I211:
   13772 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13773 		if (size > 15)
   13774 			size = 15;
   13775 		break;
   13776 	default:
   13777 		aprint_error_dev(sc->sc_dev,
   13778 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13779 		return -1;
   13780 		break;
   13781 	}
   13782 
   13783 	sc->sc_nvm_wordsize = 1 << size;
   13784 
   13785 	return 0;
   13786 }
   13787 
   13788 /*
   13789  * wm_nvm_ready_spi:
   13790  *
   13791  *	Wait for a SPI EEPROM to be ready for commands.
   13792  */
   13793 static int
   13794 wm_nvm_ready_spi(struct wm_softc *sc)
   13795 {
   13796 	uint32_t val;
   13797 	int usec;
   13798 
   13799 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13800 		device_xname(sc->sc_dev), __func__));
   13801 
   13802 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13803 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13804 		wm_eeprom_recvbits(sc, &val, 8);
   13805 		if ((val & SPI_SR_RDY) == 0)
   13806 			break;
   13807 	}
   13808 	if (usec >= SPI_MAX_RETRIES) {
   13809 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13810 		return -1;
   13811 	}
   13812 	return 0;
   13813 }
   13814 
   13815 /*
   13816  * wm_nvm_read_spi:
   13817  *
   13818  *	Read a work from the EEPROM using the SPI protocol.
   13819  */
   13820 static int
   13821 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13822 {
   13823 	uint32_t reg, val;
   13824 	int i;
   13825 	uint8_t opc;
   13826 	int rv;
   13827 
   13828 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13829 		device_xname(sc->sc_dev), __func__));
   13830 
   13831 	rv = sc->nvm.acquire(sc);
   13832 	if (rv != 0)
   13833 		return rv;
   13834 
   13835 	/* Clear SK and CS. */
   13836 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13837 	CSR_WRITE(sc, WMREG_EECD, reg);
   13838 	CSR_WRITE_FLUSH(sc);
   13839 	delay(2);
   13840 
   13841 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13842 		goto out;
   13843 
   13844 	/* Toggle CS to flush commands. */
   13845 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13846 	CSR_WRITE_FLUSH(sc);
   13847 	delay(2);
   13848 	CSR_WRITE(sc, WMREG_EECD, reg);
   13849 	CSR_WRITE_FLUSH(sc);
   13850 	delay(2);
   13851 
   13852 	opc = SPI_OPC_READ;
   13853 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13854 		opc |= SPI_OPC_A8;
   13855 
   13856 	wm_eeprom_sendbits(sc, opc, 8);
   13857 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13858 
   13859 	for (i = 0; i < wordcnt; i++) {
   13860 		wm_eeprom_recvbits(sc, &val, 16);
   13861 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13862 	}
   13863 
   13864 	/* Raise CS and clear SK. */
   13865 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13866 	CSR_WRITE(sc, WMREG_EECD, reg);
   13867 	CSR_WRITE_FLUSH(sc);
   13868 	delay(2);
   13869 
   13870 out:
   13871 	sc->nvm.release(sc);
   13872 	return rv;
   13873 }
   13874 
   13875 /* Using with EERD */
   13876 
   13877 static int
   13878 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13879 {
   13880 	uint32_t attempts = 100000;
   13881 	uint32_t i, reg = 0;
   13882 	int32_t done = -1;
   13883 
   13884 	for (i = 0; i < attempts; i++) {
   13885 		reg = CSR_READ(sc, rw);
   13886 
   13887 		if (reg & EERD_DONE) {
   13888 			done = 0;
   13889 			break;
   13890 		}
   13891 		delay(5);
   13892 	}
   13893 
   13894 	return done;
   13895 }
   13896 
   13897 static int
   13898 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13899 {
   13900 	int i, eerd = 0;
   13901 	int rv;
   13902 
   13903 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13904 		device_xname(sc->sc_dev), __func__));
   13905 
   13906 	rv = sc->nvm.acquire(sc);
   13907 	if (rv != 0)
   13908 		return rv;
   13909 
   13910 	for (i = 0; i < wordcnt; i++) {
   13911 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13912 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13913 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13914 		if (rv != 0) {
   13915 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13916 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13917 			break;
   13918 		}
   13919 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13920 	}
   13921 
   13922 	sc->nvm.release(sc);
   13923 	return rv;
   13924 }
   13925 
   13926 /* Flash */
   13927 
   13928 static int
   13929 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13930 {
   13931 	uint32_t eecd;
   13932 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13933 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13934 	uint32_t nvm_dword = 0;
   13935 	uint8_t sig_byte = 0;
   13936 	int rv;
   13937 
   13938 	switch (sc->sc_type) {
   13939 	case WM_T_PCH_SPT:
   13940 	case WM_T_PCH_CNP:
   13941 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13942 		act_offset = ICH_NVM_SIG_WORD * 2;
   13943 
   13944 		/* Set bank to 0 in case flash read fails. */
   13945 		*bank = 0;
   13946 
   13947 		/* Check bank 0 */
   13948 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13949 		if (rv != 0)
   13950 			return rv;
   13951 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13952 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13953 			*bank = 0;
   13954 			return 0;
   13955 		}
   13956 
   13957 		/* Check bank 1 */
   13958 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13959 		    &nvm_dword);
   13960 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13961 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13962 			*bank = 1;
   13963 			return 0;
   13964 		}
   13965 		aprint_error_dev(sc->sc_dev,
   13966 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13967 		return -1;
   13968 	case WM_T_ICH8:
   13969 	case WM_T_ICH9:
   13970 		eecd = CSR_READ(sc, WMREG_EECD);
   13971 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13972 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13973 			return 0;
   13974 		}
   13975 		/* FALLTHROUGH */
   13976 	default:
   13977 		/* Default to 0 */
   13978 		*bank = 0;
   13979 
   13980 		/* Check bank 0 */
   13981 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13982 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13983 			*bank = 0;
   13984 			return 0;
   13985 		}
   13986 
   13987 		/* Check bank 1 */
   13988 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13989 		    &sig_byte);
   13990 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13991 			*bank = 1;
   13992 			return 0;
   13993 		}
   13994 	}
   13995 
   13996 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13997 		device_xname(sc->sc_dev)));
   13998 	return -1;
   13999 }
   14000 
   14001 /******************************************************************************
   14002  * This function does initial flash setup so that a new read/write/erase cycle
   14003  * can be started.
   14004  *
   14005  * sc - The pointer to the hw structure
   14006  ****************************************************************************/
   14007 static int32_t
   14008 wm_ich8_cycle_init(struct wm_softc *sc)
   14009 {
   14010 	uint16_t hsfsts;
   14011 	int32_t error = 1;
   14012 	int32_t i     = 0;
   14013 
   14014 	if (sc->sc_type >= WM_T_PCH_SPT)
   14015 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14016 	else
   14017 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14018 
   14019 	/* May be check the Flash Des Valid bit in Hw status */
   14020 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14021 		return error;
   14022 
   14023 	/* Clear FCERR in Hw status by writing 1 */
   14024 	/* Clear DAEL in Hw status by writing a 1 */
   14025 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14026 
   14027 	if (sc->sc_type >= WM_T_PCH_SPT)
   14028 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14029 	else
   14030 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14031 
   14032 	/*
   14033 	 * Either we should have a hardware SPI cycle in progress bit to check
   14034 	 * against, in order to start a new cycle or FDONE bit should be
   14035 	 * changed in the hardware so that it is 1 after hardware reset, which
   14036 	 * can then be used as an indication whether a cycle is in progress or
   14037 	 * has been completed .. we should also have some software semaphore
   14038 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14039 	 * threads access to those bits can be sequentiallized or a way so that
   14040 	 * 2 threads don't start the cycle at the same time
   14041 	 */
   14042 
   14043 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14044 		/*
   14045 		 * There is no cycle running at present, so we can start a
   14046 		 * cycle
   14047 		 */
   14048 
   14049 		/* Begin by setting Flash Cycle Done. */
   14050 		hsfsts |= HSFSTS_DONE;
   14051 		if (sc->sc_type >= WM_T_PCH_SPT)
   14052 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14053 			    hsfsts & 0xffffUL);
   14054 		else
   14055 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14056 		error = 0;
   14057 	} else {
   14058 		/*
   14059 		 * Otherwise poll for sometime so the current cycle has a
   14060 		 * chance to end before giving up.
   14061 		 */
   14062 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14063 			if (sc->sc_type >= WM_T_PCH_SPT)
   14064 				hsfsts = ICH8_FLASH_READ32(sc,
   14065 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14066 			else
   14067 				hsfsts = ICH8_FLASH_READ16(sc,
   14068 				    ICH_FLASH_HSFSTS);
   14069 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14070 				error = 0;
   14071 				break;
   14072 			}
   14073 			delay(1);
   14074 		}
   14075 		if (error == 0) {
   14076 			/*
   14077 			 * Successful in waiting for previous cycle to timeout,
   14078 			 * now set the Flash Cycle Done.
   14079 			 */
   14080 			hsfsts |= HSFSTS_DONE;
   14081 			if (sc->sc_type >= WM_T_PCH_SPT)
   14082 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14083 				    hsfsts & 0xffffUL);
   14084 			else
   14085 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14086 				    hsfsts);
   14087 		}
   14088 	}
   14089 	return error;
   14090 }
   14091 
   14092 /******************************************************************************
   14093  * This function starts a flash cycle and waits for its completion
   14094  *
   14095  * sc - The pointer to the hw structure
   14096  ****************************************************************************/
   14097 static int32_t
   14098 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14099 {
   14100 	uint16_t hsflctl;
   14101 	uint16_t hsfsts;
   14102 	int32_t error = 1;
   14103 	uint32_t i = 0;
   14104 
   14105 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14106 	if (sc->sc_type >= WM_T_PCH_SPT)
   14107 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14108 	else
   14109 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14110 	hsflctl |= HSFCTL_GO;
   14111 	if (sc->sc_type >= WM_T_PCH_SPT)
   14112 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14113 		    (uint32_t)hsflctl << 16);
   14114 	else
   14115 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14116 
   14117 	/* Wait till FDONE bit is set to 1 */
   14118 	do {
   14119 		if (sc->sc_type >= WM_T_PCH_SPT)
   14120 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14121 			    & 0xffffUL;
   14122 		else
   14123 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14124 		if (hsfsts & HSFSTS_DONE)
   14125 			break;
   14126 		delay(1);
   14127 		i++;
   14128 	} while (i < timeout);
   14129 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14130 		error = 0;
   14131 
   14132 	return error;
   14133 }
   14134 
   14135 /******************************************************************************
   14136  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14137  *
   14138  * sc - The pointer to the hw structure
   14139  * index - The index of the byte or word to read.
   14140  * size - Size of data to read, 1=byte 2=word, 4=dword
   14141  * data - Pointer to the word to store the value read.
   14142  *****************************************************************************/
   14143 static int32_t
   14144 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14145     uint32_t size, uint32_t *data)
   14146 {
   14147 	uint16_t hsfsts;
   14148 	uint16_t hsflctl;
   14149 	uint32_t flash_linear_address;
   14150 	uint32_t flash_data = 0;
   14151 	int32_t error = 1;
   14152 	int32_t count = 0;
   14153 
   14154 	if (size < 1  || size > 4 || data == 0x0 ||
   14155 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14156 		return error;
   14157 
   14158 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14159 	    sc->sc_ich8_flash_base;
   14160 
   14161 	do {
   14162 		delay(1);
   14163 		/* Steps */
   14164 		error = wm_ich8_cycle_init(sc);
   14165 		if (error)
   14166 			break;
   14167 
   14168 		if (sc->sc_type >= WM_T_PCH_SPT)
   14169 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14170 			    >> 16;
   14171 		else
   14172 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14173 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14174 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14175 		    & HSFCTL_BCOUNT_MASK;
   14176 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14177 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14178 			/*
   14179 			 * In SPT, This register is in Lan memory space, not
   14180 			 * flash. Therefore, only 32 bit access is supported.
   14181 			 */
   14182 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14183 			    (uint32_t)hsflctl << 16);
   14184 		} else
   14185 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14186 
   14187 		/*
   14188 		 * Write the last 24 bits of index into Flash Linear address
   14189 		 * field in Flash Address
   14190 		 */
   14191 		/* TODO: TBD maybe check the index against the size of flash */
   14192 
   14193 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14194 
   14195 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14196 
   14197 		/*
   14198 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14199 		 * the whole sequence a few more times, else read in (shift in)
   14200 		 * the Flash Data0, the order is least significant byte first
   14201 		 * msb to lsb
   14202 		 */
   14203 		if (error == 0) {
   14204 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14205 			if (size == 1)
   14206 				*data = (uint8_t)(flash_data & 0x000000FF);
   14207 			else if (size == 2)
   14208 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14209 			else if (size == 4)
   14210 				*data = (uint32_t)flash_data;
   14211 			break;
   14212 		} else {
   14213 			/*
   14214 			 * If we've gotten here, then things are probably
   14215 			 * completely hosed, but if the error condition is
   14216 			 * detected, it won't hurt to give it another try...
   14217 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14218 			 */
   14219 			if (sc->sc_type >= WM_T_PCH_SPT)
   14220 				hsfsts = ICH8_FLASH_READ32(sc,
   14221 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14222 			else
   14223 				hsfsts = ICH8_FLASH_READ16(sc,
   14224 				    ICH_FLASH_HSFSTS);
   14225 
   14226 			if (hsfsts & HSFSTS_ERR) {
   14227 				/* Repeat for some time before giving up. */
   14228 				continue;
   14229 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14230 				break;
   14231 		}
   14232 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14233 
   14234 	return error;
   14235 }
   14236 
   14237 /******************************************************************************
   14238  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14239  *
   14240  * sc - pointer to wm_hw structure
   14241  * index - The index of the byte to read.
   14242  * data - Pointer to a byte to store the value read.
   14243  *****************************************************************************/
   14244 static int32_t
   14245 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14246 {
   14247 	int32_t status;
   14248 	uint32_t word = 0;
   14249 
   14250 	status = wm_read_ich8_data(sc, index, 1, &word);
   14251 	if (status == 0)
   14252 		*data = (uint8_t)word;
   14253 	else
   14254 		*data = 0;
   14255 
   14256 	return status;
   14257 }
   14258 
   14259 /******************************************************************************
   14260  * Reads a word from the NVM using the ICH8 flash access registers.
   14261  *
   14262  * sc - pointer to wm_hw structure
   14263  * index - The starting byte index of the word to read.
   14264  * data - Pointer to a word to store the value read.
   14265  *****************************************************************************/
   14266 static int32_t
   14267 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14268 {
   14269 	int32_t status;
   14270 	uint32_t word = 0;
   14271 
   14272 	status = wm_read_ich8_data(sc, index, 2, &word);
   14273 	if (status == 0)
   14274 		*data = (uint16_t)word;
   14275 	else
   14276 		*data = 0;
   14277 
   14278 	return status;
   14279 }
   14280 
   14281 /******************************************************************************
   14282  * Reads a dword from the NVM using the ICH8 flash access registers.
   14283  *
   14284  * sc - pointer to wm_hw structure
   14285  * index - The starting byte index of the word to read.
   14286  * data - Pointer to a word to store the value read.
   14287  *****************************************************************************/
   14288 static int32_t
   14289 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14290 {
   14291 	int32_t status;
   14292 
   14293 	status = wm_read_ich8_data(sc, index, 4, data);
   14294 	return status;
   14295 }
   14296 
   14297 /******************************************************************************
   14298  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14299  * register.
   14300  *
   14301  * sc - Struct containing variables accessed by shared code
   14302  * offset - offset of word in the EEPROM to read
   14303  * data - word read from the EEPROM
   14304  * words - number of words to read
   14305  *****************************************************************************/
   14306 static int
   14307 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14308 {
   14309 	int rv;
   14310 	uint32_t flash_bank = 0;
   14311 	uint32_t act_offset = 0;
   14312 	uint32_t bank_offset = 0;
   14313 	uint16_t word = 0;
   14314 	uint16_t i = 0;
   14315 
   14316 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14317 		device_xname(sc->sc_dev), __func__));
   14318 
   14319 	rv = sc->nvm.acquire(sc);
   14320 	if (rv != 0)
   14321 		return rv;
   14322 
   14323 	/*
   14324 	 * We need to know which is the valid flash bank.  In the event
   14325 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14326 	 * managing flash_bank. So it cannot be trusted and needs
   14327 	 * to be updated with each read.
   14328 	 */
   14329 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14330 	if (rv) {
   14331 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14332 			device_xname(sc->sc_dev)));
   14333 		flash_bank = 0;
   14334 	}
   14335 
   14336 	/*
   14337 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14338 	 * size
   14339 	 */
   14340 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14341 
   14342 	for (i = 0; i < words; i++) {
   14343 		/* The NVM part needs a byte offset, hence * 2 */
   14344 		act_offset = bank_offset + ((offset + i) * 2);
   14345 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14346 		if (rv) {
   14347 			aprint_error_dev(sc->sc_dev,
   14348 			    "%s: failed to read NVM\n", __func__);
   14349 			break;
   14350 		}
   14351 		data[i] = word;
   14352 	}
   14353 
   14354 	sc->nvm.release(sc);
   14355 	return rv;
   14356 }
   14357 
   14358 /******************************************************************************
   14359  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14360  * register.
   14361  *
   14362  * sc - Struct containing variables accessed by shared code
   14363  * offset - offset of word in the EEPROM to read
   14364  * data - word read from the EEPROM
   14365  * words - number of words to read
   14366  *****************************************************************************/
   14367 static int
   14368 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14369 {
   14370 	int	 rv;
   14371 	uint32_t flash_bank = 0;
   14372 	uint32_t act_offset = 0;
   14373 	uint32_t bank_offset = 0;
   14374 	uint32_t dword = 0;
   14375 	uint16_t i = 0;
   14376 
   14377 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14378 		device_xname(sc->sc_dev), __func__));
   14379 
   14380 	rv = sc->nvm.acquire(sc);
   14381 	if (rv != 0)
   14382 		return rv;
   14383 
   14384 	/*
   14385 	 * We need to know which is the valid flash bank.  In the event
   14386 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14387 	 * managing flash_bank. So it cannot be trusted and needs
   14388 	 * to be updated with each read.
   14389 	 */
   14390 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14391 	if (rv) {
   14392 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14393 			device_xname(sc->sc_dev)));
   14394 		flash_bank = 0;
   14395 	}
   14396 
   14397 	/*
   14398 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14399 	 * size
   14400 	 */
   14401 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14402 
   14403 	for (i = 0; i < words; i++) {
   14404 		/* The NVM part needs a byte offset, hence * 2 */
   14405 		act_offset = bank_offset + ((offset + i) * 2);
   14406 		/* but we must read dword aligned, so mask ... */
   14407 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14408 		if (rv) {
   14409 			aprint_error_dev(sc->sc_dev,
   14410 			    "%s: failed to read NVM\n", __func__);
   14411 			break;
   14412 		}
   14413 		/* ... and pick out low or high word */
   14414 		if ((act_offset & 0x2) == 0)
   14415 			data[i] = (uint16_t)(dword & 0xFFFF);
   14416 		else
   14417 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14418 	}
   14419 
   14420 	sc->nvm.release(sc);
   14421 	return rv;
   14422 }
   14423 
   14424 /* iNVM */
   14425 
   14426 static int
   14427 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14428 {
   14429 	int32_t	 rv = 0;
   14430 	uint32_t invm_dword;
   14431 	uint16_t i;
   14432 	uint8_t record_type, word_address;
   14433 
   14434 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14435 		device_xname(sc->sc_dev), __func__));
   14436 
   14437 	for (i = 0; i < INVM_SIZE; i++) {
   14438 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14439 		/* Get record type */
   14440 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14441 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14442 			break;
   14443 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14444 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14445 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14446 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14447 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14448 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14449 			if (word_address == address) {
   14450 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14451 				rv = 0;
   14452 				break;
   14453 			}
   14454 		}
   14455 	}
   14456 
   14457 	return rv;
   14458 }
   14459 
   14460 static int
   14461 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14462 {
   14463 	int i, rv;
   14464 
   14465 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14466 		device_xname(sc->sc_dev), __func__));
   14467 
   14468 	rv = sc->nvm.acquire(sc);
   14469 	if (rv != 0)
   14470 		return rv;
   14471 
   14472 	for (i = 0; i < words; i++) {
   14473 		switch (offset + i) {
   14474 		case NVM_OFF_MACADDR:
   14475 		case NVM_OFF_MACADDR1:
   14476 		case NVM_OFF_MACADDR2:
   14477 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14478 			if (rv != 0) {
   14479 				data[i] = 0xffff;
   14480 				rv = -1;
   14481 			}
   14482 			break;
   14483 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14484 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14485 			if (rv != 0) {
   14486 				*data = INVM_DEFAULT_AL;
   14487 				rv = 0;
   14488 			}
   14489 			break;
   14490 		case NVM_OFF_CFG2:
   14491 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14492 			if (rv != 0) {
   14493 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14494 				rv = 0;
   14495 			}
   14496 			break;
   14497 		case NVM_OFF_CFG4:
   14498 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14499 			if (rv != 0) {
   14500 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14501 				rv = 0;
   14502 			}
   14503 			break;
   14504 		case NVM_OFF_LED_1_CFG:
   14505 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14506 			if (rv != 0) {
   14507 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14508 				rv = 0;
   14509 			}
   14510 			break;
   14511 		case NVM_OFF_LED_0_2_CFG:
   14512 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14513 			if (rv != 0) {
   14514 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14515 				rv = 0;
   14516 			}
   14517 			break;
   14518 		case NVM_OFF_ID_LED_SETTINGS:
   14519 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14520 			if (rv != 0) {
   14521 				*data = ID_LED_RESERVED_FFFF;
   14522 				rv = 0;
   14523 			}
   14524 			break;
   14525 		default:
   14526 			DPRINTF(sc, WM_DEBUG_NVM,
   14527 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14528 			*data = NVM_RESERVED_WORD;
   14529 			break;
   14530 		}
   14531 	}
   14532 
   14533 	sc->nvm.release(sc);
   14534 	return rv;
   14535 }
   14536 
   14537 /* Lock, detecting NVM type, validate checksum, version and read */
   14538 
   14539 static int
   14540 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14541 {
   14542 	uint32_t eecd = 0;
   14543 
   14544 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14545 	    || sc->sc_type == WM_T_82583) {
   14546 		eecd = CSR_READ(sc, WMREG_EECD);
   14547 
   14548 		/* Isolate bits 15 & 16 */
   14549 		eecd = ((eecd >> 15) & 0x03);
   14550 
   14551 		/* If both bits are set, device is Flash type */
   14552 		if (eecd == 0x03)
   14553 			return 0;
   14554 	}
   14555 	return 1;
   14556 }
   14557 
   14558 static int
   14559 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14560 {
   14561 	uint32_t eec;
   14562 
   14563 	eec = CSR_READ(sc, WMREG_EEC);
   14564 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14565 		return 1;
   14566 
   14567 	return 0;
   14568 }
   14569 
   14570 /*
   14571  * wm_nvm_validate_checksum
   14572  *
   14573  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14574  */
   14575 static int
   14576 wm_nvm_validate_checksum(struct wm_softc *sc)
   14577 {
   14578 	uint16_t checksum;
   14579 	uint16_t eeprom_data;
   14580 #ifdef WM_DEBUG
   14581 	uint16_t csum_wordaddr, valid_checksum;
   14582 #endif
   14583 	int i;
   14584 
   14585 	checksum = 0;
   14586 
   14587 	/* Don't check for I211 */
   14588 	if (sc->sc_type == WM_T_I211)
   14589 		return 0;
   14590 
   14591 #ifdef WM_DEBUG
   14592 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14593 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14594 		csum_wordaddr = NVM_OFF_COMPAT;
   14595 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14596 	} else {
   14597 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14598 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14599 	}
   14600 
   14601 	/* Dump EEPROM image for debug */
   14602 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14603 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14604 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14605 		/* XXX PCH_SPT? */
   14606 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14607 		if ((eeprom_data & valid_checksum) == 0)
   14608 			DPRINTF(sc, WM_DEBUG_NVM,
   14609 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14610 				device_xname(sc->sc_dev), eeprom_data,
   14611 				valid_checksum));
   14612 	}
   14613 
   14614 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14615 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14616 		for (i = 0; i < NVM_SIZE; i++) {
   14617 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14618 				printf("XXXX ");
   14619 			else
   14620 				printf("%04hx ", eeprom_data);
   14621 			if (i % 8 == 7)
   14622 				printf("\n");
   14623 		}
   14624 	}
   14625 
   14626 #endif /* WM_DEBUG */
   14627 
   14628 	for (i = 0; i < NVM_SIZE; i++) {
   14629 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14630 			return -1;
   14631 		checksum += eeprom_data;
   14632 	}
   14633 
   14634 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14635 #ifdef WM_DEBUG
   14636 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14637 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14638 #endif
   14639 	}
   14640 
   14641 	return 0;
   14642 }
   14643 
   14644 static void
   14645 wm_nvm_version_invm(struct wm_softc *sc)
   14646 {
   14647 	uint32_t dword;
   14648 
   14649 	/*
   14650 	 * Linux's code to decode version is very strange, so we don't
   14651 	 * obey that algorithm and just use word 61 as the document.
   14652 	 * Perhaps it's not perfect though...
   14653 	 *
   14654 	 * Example:
   14655 	 *
   14656 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14657 	 */
   14658 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14659 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14660 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14661 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14662 }
   14663 
   14664 static void
   14665 wm_nvm_version(struct wm_softc *sc)
   14666 {
   14667 	uint16_t major, minor, build, patch;
   14668 	uint16_t uid0, uid1;
   14669 	uint16_t nvm_data;
   14670 	uint16_t off;
   14671 	bool check_version = false;
   14672 	bool check_optionrom = false;
   14673 	bool have_build = false;
   14674 	bool have_uid = true;
   14675 
   14676 	/*
   14677 	 * Version format:
   14678 	 *
   14679 	 * XYYZ
   14680 	 * X0YZ
   14681 	 * X0YY
   14682 	 *
   14683 	 * Example:
   14684 	 *
   14685 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14686 	 *	82571	0x50a6	5.10.6?
   14687 	 *	82572	0x506a	5.6.10?
   14688 	 *	82572EI	0x5069	5.6.9?
   14689 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14690 	 *		0x2013	2.1.3?
   14691 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14692 	 * ICH8+82567	0x0040	0.4.0?
   14693 	 * ICH9+82566	0x1040	1.4.0?
   14694 	 *ICH10+82567	0x0043	0.4.3?
   14695 	 *  PCH+82577	0x00c1	0.12.1?
   14696 	 * PCH2+82579	0x00d3	0.13.3?
   14697 	 *		0x00d4	0.13.4?
   14698 	 *  LPT+I218	0x0023	0.2.3?
   14699 	 *  SPT+I219	0x0084	0.8.4?
   14700 	 *  CNP+I219	0x0054	0.5.4?
   14701 	 */
   14702 
   14703 	/*
   14704 	 * XXX
   14705 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14706 	 * I've never seen real 82574 hardware with such small SPI ROM.
   14707 	 */
   14708 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14709 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14710 		have_uid = false;
   14711 
   14712 	switch (sc->sc_type) {
   14713 	case WM_T_82571:
   14714 	case WM_T_82572:
   14715 	case WM_T_82574:
   14716 	case WM_T_82583:
   14717 		check_version = true;
   14718 		check_optionrom = true;
   14719 		have_build = true;
   14720 		break;
   14721 	case WM_T_ICH8:
   14722 	case WM_T_ICH9:
   14723 	case WM_T_ICH10:
   14724 	case WM_T_PCH:
   14725 	case WM_T_PCH2:
   14726 	case WM_T_PCH_LPT:
   14727 	case WM_T_PCH_SPT:
   14728 	case WM_T_PCH_CNP:
   14729 		check_version = true;
   14730 		have_build = true;
   14731 		have_uid = false;
   14732 		break;
   14733 	case WM_T_82575:
   14734 	case WM_T_82576:
   14735 	case WM_T_82580:
   14736 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14737 			check_version = true;
   14738 		break;
   14739 	case WM_T_I211:
   14740 		wm_nvm_version_invm(sc);
   14741 		have_uid = false;
   14742 		goto printver;
   14743 	case WM_T_I210:
   14744 		if (!wm_nvm_flash_presence_i210(sc)) {
   14745 			wm_nvm_version_invm(sc);
   14746 			have_uid = false;
   14747 			goto printver;
   14748 		}
   14749 		/* FALLTHROUGH */
   14750 	case WM_T_I350:
   14751 	case WM_T_I354:
   14752 		check_version = true;
   14753 		check_optionrom = true;
   14754 		break;
   14755 	default:
   14756 		return;
   14757 	}
   14758 	if (check_version
   14759 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14760 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14761 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14762 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14763 			build = nvm_data & NVM_BUILD_MASK;
   14764 			have_build = true;
   14765 		} else
   14766 			minor = nvm_data & 0x00ff;
   14767 
   14768 		/* Decimal */
   14769 		minor = (minor / 16) * 10 + (minor % 16);
   14770 		sc->sc_nvm_ver_major = major;
   14771 		sc->sc_nvm_ver_minor = minor;
   14772 
   14773 printver:
   14774 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14775 		    sc->sc_nvm_ver_minor);
   14776 		if (have_build) {
   14777 			sc->sc_nvm_ver_build = build;
   14778 			aprint_verbose(".%d", build);
   14779 		}
   14780 	}
   14781 
   14782 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14783 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14784 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14785 		/* Option ROM Version */
   14786 		if ((off != 0x0000) && (off != 0xffff)) {
   14787 			int rv;
   14788 
   14789 			off += NVM_COMBO_VER_OFF;
   14790 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14791 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14792 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14793 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14794 				/* 16bits */
   14795 				major = uid0 >> 8;
   14796 				build = (uid0 << 8) | (uid1 >> 8);
   14797 				patch = uid1 & 0x00ff;
   14798 				aprint_verbose(", option ROM Version %d.%d.%d",
   14799 				    major, build, patch);
   14800 			}
   14801 		}
   14802 	}
   14803 
   14804 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14805 		aprint_verbose(", Image Unique ID %08x",
   14806 		    ((uint32_t)uid1 << 16) | uid0);
   14807 }
   14808 
   14809 /*
   14810  * wm_nvm_read:
   14811  *
   14812  *	Read data from the serial EEPROM.
   14813  */
   14814 static int
   14815 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14816 {
   14817 	int rv;
   14818 
   14819 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14820 		device_xname(sc->sc_dev), __func__));
   14821 
   14822 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14823 		return -1;
   14824 
   14825 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14826 
   14827 	return rv;
   14828 }
   14829 
   14830 /*
   14831  * Hardware semaphores.
   14832  * Very complexed...
   14833  */
   14834 
   14835 static int
   14836 wm_get_null(struct wm_softc *sc)
   14837 {
   14838 
   14839 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14840 		device_xname(sc->sc_dev), __func__));
   14841 	return 0;
   14842 }
   14843 
   14844 static void
   14845 wm_put_null(struct wm_softc *sc)
   14846 {
   14847 
   14848 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14849 		device_xname(sc->sc_dev), __func__));
   14850 	return;
   14851 }
   14852 
   14853 static int
   14854 wm_get_eecd(struct wm_softc *sc)
   14855 {
   14856 	uint32_t reg;
   14857 	int x;
   14858 
   14859 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14860 		device_xname(sc->sc_dev), __func__));
   14861 
   14862 	reg = CSR_READ(sc, WMREG_EECD);
   14863 
   14864 	/* Request EEPROM access. */
   14865 	reg |= EECD_EE_REQ;
   14866 	CSR_WRITE(sc, WMREG_EECD, reg);
   14867 
   14868 	/* ..and wait for it to be granted. */
   14869 	for (x = 0; x < 1000; x++) {
   14870 		reg = CSR_READ(sc, WMREG_EECD);
   14871 		if (reg & EECD_EE_GNT)
   14872 			break;
   14873 		delay(5);
   14874 	}
   14875 	if ((reg & EECD_EE_GNT) == 0) {
   14876 		aprint_error_dev(sc->sc_dev,
   14877 		    "could not acquire EEPROM GNT\n");
   14878 		reg &= ~EECD_EE_REQ;
   14879 		CSR_WRITE(sc, WMREG_EECD, reg);
   14880 		return -1;
   14881 	}
   14882 
   14883 	return 0;
   14884 }
   14885 
   14886 static void
   14887 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14888 {
   14889 
   14890 	*eecd |= EECD_SK;
   14891 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14892 	CSR_WRITE_FLUSH(sc);
   14893 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14894 		delay(1);
   14895 	else
   14896 		delay(50);
   14897 }
   14898 
   14899 static void
   14900 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14901 {
   14902 
   14903 	*eecd &= ~EECD_SK;
   14904 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14905 	CSR_WRITE_FLUSH(sc);
   14906 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14907 		delay(1);
   14908 	else
   14909 		delay(50);
   14910 }
   14911 
   14912 static void
   14913 wm_put_eecd(struct wm_softc *sc)
   14914 {
   14915 	uint32_t reg;
   14916 
   14917 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14918 		device_xname(sc->sc_dev), __func__));
   14919 
   14920 	/* Stop nvm */
   14921 	reg = CSR_READ(sc, WMREG_EECD);
   14922 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14923 		/* Pull CS high */
   14924 		reg |= EECD_CS;
   14925 		wm_nvm_eec_clock_lower(sc, &reg);
   14926 	} else {
   14927 		/* CS on Microwire is active-high */
   14928 		reg &= ~(EECD_CS | EECD_DI);
   14929 		CSR_WRITE(sc, WMREG_EECD, reg);
   14930 		wm_nvm_eec_clock_raise(sc, &reg);
   14931 		wm_nvm_eec_clock_lower(sc, &reg);
   14932 	}
   14933 
   14934 	reg = CSR_READ(sc, WMREG_EECD);
   14935 	reg &= ~EECD_EE_REQ;
   14936 	CSR_WRITE(sc, WMREG_EECD, reg);
   14937 
   14938 	return;
   14939 }
   14940 
   14941 /*
   14942  * Get hardware semaphore.
   14943  * Same as e1000_get_hw_semaphore_generic()
   14944  */
   14945 static int
   14946 wm_get_swsm_semaphore(struct wm_softc *sc)
   14947 {
   14948 	int32_t timeout;
   14949 	uint32_t swsm;
   14950 
   14951 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14952 		device_xname(sc->sc_dev), __func__));
   14953 	KASSERT(sc->sc_nvm_wordsize > 0);
   14954 
   14955 retry:
   14956 	/* Get the SW semaphore. */
   14957 	timeout = sc->sc_nvm_wordsize + 1;
   14958 	while (timeout) {
   14959 		swsm = CSR_READ(sc, WMREG_SWSM);
   14960 
   14961 		if ((swsm & SWSM_SMBI) == 0)
   14962 			break;
   14963 
   14964 		delay(50);
   14965 		timeout--;
   14966 	}
   14967 
   14968 	if (timeout == 0) {
   14969 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14970 			/*
   14971 			 * In rare circumstances, the SW semaphore may already
   14972 			 * be held unintentionally. Clear the semaphore once
   14973 			 * before giving up.
   14974 			 */
   14975 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14976 			wm_put_swsm_semaphore(sc);
   14977 			goto retry;
   14978 		}
   14979 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   14980 		return -1;
   14981 	}
   14982 
   14983 	/* Get the FW semaphore. */
   14984 	timeout = sc->sc_nvm_wordsize + 1;
   14985 	while (timeout) {
   14986 		swsm = CSR_READ(sc, WMREG_SWSM);
   14987 		swsm |= SWSM_SWESMBI;
   14988 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14989 		/* If we managed to set the bit we got the semaphore. */
   14990 		swsm = CSR_READ(sc, WMREG_SWSM);
   14991 		if (swsm & SWSM_SWESMBI)
   14992 			break;
   14993 
   14994 		delay(50);
   14995 		timeout--;
   14996 	}
   14997 
   14998 	if (timeout == 0) {
   14999 		aprint_error_dev(sc->sc_dev,
   15000 		    "could not acquire SWSM SWESMBI\n");
   15001 		/* Release semaphores */
   15002 		wm_put_swsm_semaphore(sc);
   15003 		return -1;
   15004 	}
   15005 	return 0;
   15006 }
   15007 
   15008 /*
   15009  * Put hardware semaphore.
   15010  * Same as e1000_put_hw_semaphore_generic()
   15011  */
   15012 static void
   15013 wm_put_swsm_semaphore(struct wm_softc *sc)
   15014 {
   15015 	uint32_t swsm;
   15016 
   15017 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15018 		device_xname(sc->sc_dev), __func__));
   15019 
   15020 	swsm = CSR_READ(sc, WMREG_SWSM);
   15021 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15022 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15023 }
   15024 
   15025 /*
   15026  * Get SW/FW semaphore.
   15027  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15028  */
   15029 static int
   15030 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15031 {
   15032 	uint32_t swfw_sync;
   15033 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15034 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15035 	int timeout;
   15036 
   15037 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15038 		device_xname(sc->sc_dev), __func__));
   15039 
   15040 	if (sc->sc_type == WM_T_80003)
   15041 		timeout = 50;
   15042 	else
   15043 		timeout = 200;
   15044 
   15045 	while (timeout) {
   15046 		if (wm_get_swsm_semaphore(sc)) {
   15047 			aprint_error_dev(sc->sc_dev,
   15048 			    "%s: failed to get semaphore\n",
   15049 			    __func__);
   15050 			return -1;
   15051 		}
   15052 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15053 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15054 			swfw_sync |= swmask;
   15055 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15056 			wm_put_swsm_semaphore(sc);
   15057 			return 0;
   15058 		}
   15059 		wm_put_swsm_semaphore(sc);
   15060 		delay(5000);
   15061 		timeout--;
   15062 	}
   15063 	device_printf(sc->sc_dev,
   15064 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15065 	    mask, swfw_sync);
   15066 	return -1;
   15067 }
   15068 
   15069 static void
   15070 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15071 {
   15072 	uint32_t swfw_sync;
   15073 
   15074 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15075 		device_xname(sc->sc_dev), __func__));
   15076 
   15077 	while (wm_get_swsm_semaphore(sc) != 0)
   15078 		continue;
   15079 
   15080 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15081 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15082 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15083 
   15084 	wm_put_swsm_semaphore(sc);
   15085 }
   15086 
   15087 static int
   15088 wm_get_nvm_80003(struct wm_softc *sc)
   15089 {
   15090 	int rv;
   15091 
   15092 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15093 		device_xname(sc->sc_dev), __func__));
   15094 
   15095 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15096 		aprint_error_dev(sc->sc_dev,
   15097 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15098 		return rv;
   15099 	}
   15100 
   15101 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15102 	    && (rv = wm_get_eecd(sc)) != 0) {
   15103 		aprint_error_dev(sc->sc_dev,
   15104 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15105 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15106 		return rv;
   15107 	}
   15108 
   15109 	return 0;
   15110 }
   15111 
   15112 static void
   15113 wm_put_nvm_80003(struct wm_softc *sc)
   15114 {
   15115 
   15116 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15117 		device_xname(sc->sc_dev), __func__));
   15118 
   15119 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15120 		wm_put_eecd(sc);
   15121 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15122 }
   15123 
   15124 static int
   15125 wm_get_nvm_82571(struct wm_softc *sc)
   15126 {
   15127 	int rv;
   15128 
   15129 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15130 		device_xname(sc->sc_dev), __func__));
   15131 
   15132 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15133 		return rv;
   15134 
   15135 	switch (sc->sc_type) {
   15136 	case WM_T_82573:
   15137 		break;
   15138 	default:
   15139 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15140 			rv = wm_get_eecd(sc);
   15141 		break;
   15142 	}
   15143 
   15144 	if (rv != 0) {
   15145 		aprint_error_dev(sc->sc_dev,
   15146 		    "%s: failed to get semaphore\n",
   15147 		    __func__);
   15148 		wm_put_swsm_semaphore(sc);
   15149 	}
   15150 
   15151 	return rv;
   15152 }
   15153 
   15154 static void
   15155 wm_put_nvm_82571(struct wm_softc *sc)
   15156 {
   15157 
   15158 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15159 		device_xname(sc->sc_dev), __func__));
   15160 
   15161 	switch (sc->sc_type) {
   15162 	case WM_T_82573:
   15163 		break;
   15164 	default:
   15165 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15166 			wm_put_eecd(sc);
   15167 		break;
   15168 	}
   15169 
   15170 	wm_put_swsm_semaphore(sc);
   15171 }
   15172 
   15173 static int
   15174 wm_get_phy_82575(struct wm_softc *sc)
   15175 {
   15176 
   15177 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15178 		device_xname(sc->sc_dev), __func__));
   15179 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15180 }
   15181 
   15182 static void
   15183 wm_put_phy_82575(struct wm_softc *sc)
   15184 {
   15185 
   15186 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15187 		device_xname(sc->sc_dev), __func__));
   15188 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15189 }
   15190 
   15191 static int
   15192 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15193 {
   15194 	uint32_t ext_ctrl;
   15195 	int timeout = 200;
   15196 
   15197 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15198 		device_xname(sc->sc_dev), __func__));
   15199 
   15200 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15201 	for (timeout = 0; timeout < 200; timeout++) {
   15202 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15203 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15204 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15205 
   15206 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15207 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15208 			return 0;
   15209 		delay(5000);
   15210 	}
   15211 	device_printf(sc->sc_dev,
   15212 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15213 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15214 	return -1;
   15215 }
   15216 
   15217 static void
   15218 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15219 {
   15220 	uint32_t ext_ctrl;
   15221 
   15222 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15223 		device_xname(sc->sc_dev), __func__));
   15224 
   15225 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15226 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15227 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15228 
   15229 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15230 }
   15231 
   15232 static int
   15233 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15234 {
   15235 	uint32_t ext_ctrl;
   15236 	int timeout;
   15237 
   15238 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15239 		device_xname(sc->sc_dev), __func__));
   15240 	mutex_enter(sc->sc_ich_phymtx);
   15241 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15242 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15243 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15244 			break;
   15245 		delay(1000);
   15246 	}
   15247 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15248 		device_printf(sc->sc_dev,
   15249 		    "SW has already locked the resource\n");
   15250 		goto out;
   15251 	}
   15252 
   15253 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15254 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15255 	for (timeout = 0; timeout < 1000; timeout++) {
   15256 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15257 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15258 			break;
   15259 		delay(1000);
   15260 	}
   15261 	if (timeout >= 1000) {
   15262 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15263 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15264 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15265 		goto out;
   15266 	}
   15267 	return 0;
   15268 
   15269 out:
   15270 	mutex_exit(sc->sc_ich_phymtx);
   15271 	return -1;
   15272 }
   15273 
   15274 static void
   15275 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15276 {
   15277 	uint32_t ext_ctrl;
   15278 
   15279 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15280 		device_xname(sc->sc_dev), __func__));
   15281 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15282 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15283 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15284 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15285 	} else
   15286 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15287 
   15288 	mutex_exit(sc->sc_ich_phymtx);
   15289 }
   15290 
   15291 static int
   15292 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15293 {
   15294 
   15295 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15296 		device_xname(sc->sc_dev), __func__));
   15297 	mutex_enter(sc->sc_ich_nvmmtx);
   15298 
   15299 	return 0;
   15300 }
   15301 
   15302 static void
   15303 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15304 {
   15305 
   15306 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15307 		device_xname(sc->sc_dev), __func__));
   15308 	mutex_exit(sc->sc_ich_nvmmtx);
   15309 }
   15310 
   15311 static int
   15312 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15313 {
   15314 	int i = 0;
   15315 	uint32_t reg;
   15316 
   15317 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15318 		device_xname(sc->sc_dev), __func__));
   15319 
   15320 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15321 	do {
   15322 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15323 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15324 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15325 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15326 			break;
   15327 		delay(2*1000);
   15328 		i++;
   15329 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15330 
   15331 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15332 		wm_put_hw_semaphore_82573(sc);
   15333 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15334 		    device_xname(sc->sc_dev));
   15335 		return -1;
   15336 	}
   15337 
   15338 	return 0;
   15339 }
   15340 
   15341 static void
   15342 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15343 {
   15344 	uint32_t reg;
   15345 
   15346 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15347 		device_xname(sc->sc_dev), __func__));
   15348 
   15349 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15350 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15351 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15352 }
   15353 
   15354 /*
   15355  * Management mode and power management related subroutines.
   15356  * BMC, AMT, suspend/resume and EEE.
   15357  */
   15358 
   15359 #ifdef WM_WOL
   15360 static int
   15361 wm_check_mng_mode(struct wm_softc *sc)
   15362 {
   15363 	int rv;
   15364 
   15365 	switch (sc->sc_type) {
   15366 	case WM_T_ICH8:
   15367 	case WM_T_ICH9:
   15368 	case WM_T_ICH10:
   15369 	case WM_T_PCH:
   15370 	case WM_T_PCH2:
   15371 	case WM_T_PCH_LPT:
   15372 	case WM_T_PCH_SPT:
   15373 	case WM_T_PCH_CNP:
   15374 		rv = wm_check_mng_mode_ich8lan(sc);
   15375 		break;
   15376 	case WM_T_82574:
   15377 	case WM_T_82583:
   15378 		rv = wm_check_mng_mode_82574(sc);
   15379 		break;
   15380 	case WM_T_82571:
   15381 	case WM_T_82572:
   15382 	case WM_T_82573:
   15383 	case WM_T_80003:
   15384 		rv = wm_check_mng_mode_generic(sc);
   15385 		break;
   15386 	default:
   15387 		/* Noting to do */
   15388 		rv = 0;
   15389 		break;
   15390 	}
   15391 
   15392 	return rv;
   15393 }
   15394 
   15395 static int
   15396 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15397 {
   15398 	uint32_t fwsm;
   15399 
   15400 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15401 
   15402 	if (((fwsm & FWSM_FW_VALID) != 0)
   15403 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15404 		return 1;
   15405 
   15406 	return 0;
   15407 }
   15408 
   15409 static int
   15410 wm_check_mng_mode_82574(struct wm_softc *sc)
   15411 {
   15412 	uint16_t data;
   15413 
   15414 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15415 
   15416 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15417 		return 1;
   15418 
   15419 	return 0;
   15420 }
   15421 
   15422 static int
   15423 wm_check_mng_mode_generic(struct wm_softc *sc)
   15424 {
   15425 	uint32_t fwsm;
   15426 
   15427 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15428 
   15429 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15430 		return 1;
   15431 
   15432 	return 0;
   15433 }
   15434 #endif /* WM_WOL */
   15435 
   15436 static int
   15437 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15438 {
   15439 	uint32_t manc, fwsm, factps;
   15440 
   15441 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15442 		return 0;
   15443 
   15444 	manc = CSR_READ(sc, WMREG_MANC);
   15445 
   15446 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15447 		device_xname(sc->sc_dev), manc));
   15448 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15449 		return 0;
   15450 
   15451 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15452 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15453 		factps = CSR_READ(sc, WMREG_FACTPS);
   15454 		if (((factps & FACTPS_MNGCG) == 0)
   15455 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15456 			return 1;
   15457 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15458 		uint16_t data;
   15459 
   15460 		factps = CSR_READ(sc, WMREG_FACTPS);
   15461 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15462 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15463 			device_xname(sc->sc_dev), factps, data));
   15464 		if (((factps & FACTPS_MNGCG) == 0)
   15465 		    && ((data & NVM_CFG2_MNGM_MASK)
   15466 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15467 			return 1;
   15468 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15469 	    && ((manc & MANC_ASF_EN) == 0))
   15470 		return 1;
   15471 
   15472 	return 0;
   15473 }
   15474 
   15475 static bool
   15476 wm_phy_resetisblocked(struct wm_softc *sc)
   15477 {
   15478 	bool blocked = false;
   15479 	uint32_t reg;
   15480 	int i = 0;
   15481 
   15482 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15483 		device_xname(sc->sc_dev), __func__));
   15484 
   15485 	switch (sc->sc_type) {
   15486 	case WM_T_ICH8:
   15487 	case WM_T_ICH9:
   15488 	case WM_T_ICH10:
   15489 	case WM_T_PCH:
   15490 	case WM_T_PCH2:
   15491 	case WM_T_PCH_LPT:
   15492 	case WM_T_PCH_SPT:
   15493 	case WM_T_PCH_CNP:
   15494 		do {
   15495 			reg = CSR_READ(sc, WMREG_FWSM);
   15496 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15497 				blocked = true;
   15498 				delay(10*1000);
   15499 				continue;
   15500 			}
   15501 			blocked = false;
   15502 		} while (blocked && (i++ < 30));
   15503 		return blocked;
   15504 		break;
   15505 	case WM_T_82571:
   15506 	case WM_T_82572:
   15507 	case WM_T_82573:
   15508 	case WM_T_82574:
   15509 	case WM_T_82583:
   15510 	case WM_T_80003:
   15511 		reg = CSR_READ(sc, WMREG_MANC);
   15512 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15513 			return true;
   15514 		else
   15515 			return false;
   15516 		break;
   15517 	default:
   15518 		/* No problem */
   15519 		break;
   15520 	}
   15521 
   15522 	return false;
   15523 }
   15524 
   15525 static void
   15526 wm_get_hw_control(struct wm_softc *sc)
   15527 {
   15528 	uint32_t reg;
   15529 
   15530 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15531 		device_xname(sc->sc_dev), __func__));
   15532 
   15533 	if (sc->sc_type == WM_T_82573) {
   15534 		reg = CSR_READ(sc, WMREG_SWSM);
   15535 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15536 	} else if (sc->sc_type >= WM_T_82571) {
   15537 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15538 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15539 	}
   15540 }
   15541 
   15542 static void
   15543 wm_release_hw_control(struct wm_softc *sc)
   15544 {
   15545 	uint32_t reg;
   15546 
   15547 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15548 		device_xname(sc->sc_dev), __func__));
   15549 
   15550 	if (sc->sc_type == WM_T_82573) {
   15551 		reg = CSR_READ(sc, WMREG_SWSM);
   15552 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15553 	} else if (sc->sc_type >= WM_T_82571) {
   15554 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15555 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15556 	}
   15557 }
   15558 
   15559 static void
   15560 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15561 {
   15562 	uint32_t reg;
   15563 
   15564 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15565 		device_xname(sc->sc_dev), __func__));
   15566 
   15567 	if (sc->sc_type < WM_T_PCH2)
   15568 		return;
   15569 
   15570 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15571 
   15572 	if (gate)
   15573 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15574 	else
   15575 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15576 
   15577 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15578 }
   15579 
   15580 static int
   15581 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15582 {
   15583 	uint32_t fwsm, reg;
   15584 	int rv;
   15585 
   15586 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15587 		device_xname(sc->sc_dev), __func__));
   15588 
   15589 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15590 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15591 
   15592 	/* Disable ULP */
   15593 	wm_ulp_disable(sc);
   15594 
   15595 	/* Acquire PHY semaphore */
   15596 	rv = sc->phy.acquire(sc);
   15597 	if (rv != 0) {
   15598 		DPRINTF(sc, WM_DEBUG_INIT,
   15599 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   15600 		return rv;
   15601 	}
   15602 
   15603 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15604 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15605 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15606 	 */
   15607 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15608 	switch (sc->sc_type) {
   15609 	case WM_T_PCH_LPT:
   15610 	case WM_T_PCH_SPT:
   15611 	case WM_T_PCH_CNP:
   15612 		if (wm_phy_is_accessible_pchlan(sc))
   15613 			break;
   15614 
   15615 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15616 		 * forcing MAC to SMBus mode first.
   15617 		 */
   15618 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15619 		reg |= CTRL_EXT_FORCE_SMBUS;
   15620 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15621 #if 0
   15622 		/* XXX Isn't this required??? */
   15623 		CSR_WRITE_FLUSH(sc);
   15624 #endif
   15625 		/* Wait 50 milliseconds for MAC to finish any retries
   15626 		 * that it might be trying to perform from previous
   15627 		 * attempts to acknowledge any phy read requests.
   15628 		 */
   15629 		delay(50 * 1000);
   15630 		/* FALLTHROUGH */
   15631 	case WM_T_PCH2:
   15632 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15633 			break;
   15634 		/* FALLTHROUGH */
   15635 	case WM_T_PCH:
   15636 		if (sc->sc_type == WM_T_PCH)
   15637 			if ((fwsm & FWSM_FW_VALID) != 0)
   15638 				break;
   15639 
   15640 		if (wm_phy_resetisblocked(sc) == true) {
   15641 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   15642 			break;
   15643 		}
   15644 
   15645 		/* Toggle LANPHYPC Value bit */
   15646 		wm_toggle_lanphypc_pch_lpt(sc);
   15647 
   15648 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15649 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15650 				break;
   15651 
   15652 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15653 			 * so ensure that the MAC is also out of SMBus mode
   15654 			 */
   15655 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15656 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15657 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15658 
   15659 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15660 				break;
   15661 			rv = -1;
   15662 		}
   15663 		break;
   15664 	default:
   15665 		break;
   15666 	}
   15667 
   15668 	/* Release semaphore */
   15669 	sc->phy.release(sc);
   15670 
   15671 	if (rv == 0) {
   15672 		/* Check to see if able to reset PHY.  Print error if not */
   15673 		if (wm_phy_resetisblocked(sc)) {
   15674 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15675 			goto out;
   15676 		}
   15677 
   15678 		/* Reset the PHY before any access to it.  Doing so, ensures
   15679 		 * that the PHY is in a known good state before we read/write
   15680 		 * PHY registers.  The generic reset is sufficient here,
   15681 		 * because we haven't determined the PHY type yet.
   15682 		 */
   15683 		if (wm_reset_phy(sc) != 0)
   15684 			goto out;
   15685 
   15686 		/* On a successful reset, possibly need to wait for the PHY
   15687 		 * to quiesce to an accessible state before returning control
   15688 		 * to the calling function.  If the PHY does not quiesce, then
   15689 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15690 		 *  the PHY is in.
   15691 		 */
   15692 		if (wm_phy_resetisblocked(sc))
   15693 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15694 	}
   15695 
   15696 out:
   15697 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15698 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15699 		delay(10*1000);
   15700 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15701 	}
   15702 
   15703 	return 0;
   15704 }
   15705 
   15706 static void
   15707 wm_init_manageability(struct wm_softc *sc)
   15708 {
   15709 
   15710 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15711 		device_xname(sc->sc_dev), __func__));
   15712 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   15713 
   15714 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15715 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15716 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15717 
   15718 		/* Disable hardware interception of ARP */
   15719 		manc &= ~MANC_ARP_EN;
   15720 
   15721 		/* Enable receiving management packets to the host */
   15722 		if (sc->sc_type >= WM_T_82571) {
   15723 			manc |= MANC_EN_MNG2HOST;
   15724 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15725 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15726 		}
   15727 
   15728 		CSR_WRITE(sc, WMREG_MANC, manc);
   15729 	}
   15730 }
   15731 
   15732 static void
   15733 wm_release_manageability(struct wm_softc *sc)
   15734 {
   15735 
   15736 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15737 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15738 
   15739 		manc |= MANC_ARP_EN;
   15740 		if (sc->sc_type >= WM_T_82571)
   15741 			manc &= ~MANC_EN_MNG2HOST;
   15742 
   15743 		CSR_WRITE(sc, WMREG_MANC, manc);
   15744 	}
   15745 }
   15746 
   15747 static void
   15748 wm_get_wakeup(struct wm_softc *sc)
   15749 {
   15750 
   15751 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15752 	switch (sc->sc_type) {
   15753 	case WM_T_82573:
   15754 	case WM_T_82583:
   15755 		sc->sc_flags |= WM_F_HAS_AMT;
   15756 		/* FALLTHROUGH */
   15757 	case WM_T_80003:
   15758 	case WM_T_82575:
   15759 	case WM_T_82576:
   15760 	case WM_T_82580:
   15761 	case WM_T_I350:
   15762 	case WM_T_I354:
   15763 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15764 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15765 		/* FALLTHROUGH */
   15766 	case WM_T_82541:
   15767 	case WM_T_82541_2:
   15768 	case WM_T_82547:
   15769 	case WM_T_82547_2:
   15770 	case WM_T_82571:
   15771 	case WM_T_82572:
   15772 	case WM_T_82574:
   15773 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15774 		break;
   15775 	case WM_T_ICH8:
   15776 	case WM_T_ICH9:
   15777 	case WM_T_ICH10:
   15778 	case WM_T_PCH:
   15779 	case WM_T_PCH2:
   15780 	case WM_T_PCH_LPT:
   15781 	case WM_T_PCH_SPT:
   15782 	case WM_T_PCH_CNP:
   15783 		sc->sc_flags |= WM_F_HAS_AMT;
   15784 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15785 		break;
   15786 	default:
   15787 		break;
   15788 	}
   15789 
   15790 	/* 1: HAS_MANAGE */
   15791 	if (wm_enable_mng_pass_thru(sc) != 0)
   15792 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15793 
   15794 	/*
   15795 	 * Note that the WOL flags is set after the resetting of the eeprom
   15796 	 * stuff
   15797 	 */
   15798 }
   15799 
   15800 /*
   15801  * Unconfigure Ultra Low Power mode.
   15802  * Only for I217 and newer (see below).
   15803  */
   15804 static int
   15805 wm_ulp_disable(struct wm_softc *sc)
   15806 {
   15807 	uint32_t reg;
   15808 	uint16_t phyreg;
   15809 	int i = 0, rv;
   15810 
   15811 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15812 		device_xname(sc->sc_dev), __func__));
   15813 	/* Exclude old devices */
   15814 	if ((sc->sc_type < WM_T_PCH_LPT)
   15815 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15816 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15817 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15818 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15819 		return 0;
   15820 
   15821 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15822 		/* Request ME un-configure ULP mode in the PHY */
   15823 		reg = CSR_READ(sc, WMREG_H2ME);
   15824 		reg &= ~H2ME_ULP;
   15825 		reg |= H2ME_ENFORCE_SETTINGS;
   15826 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15827 
   15828 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15829 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15830 			if (i++ == 30) {
   15831 				device_printf(sc->sc_dev, "%s timed out\n",
   15832 				    __func__);
   15833 				return -1;
   15834 			}
   15835 			delay(10 * 1000);
   15836 		}
   15837 		reg = CSR_READ(sc, WMREG_H2ME);
   15838 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15839 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15840 
   15841 		return 0;
   15842 	}
   15843 
   15844 	/* Acquire semaphore */
   15845 	rv = sc->phy.acquire(sc);
   15846 	if (rv != 0) {
   15847 		DPRINTF(sc, WM_DEBUG_INIT,
   15848 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   15849 		return rv;
   15850 	}
   15851 
   15852 	/* Toggle LANPHYPC */
   15853 	wm_toggle_lanphypc_pch_lpt(sc);
   15854 
   15855 	/* Unforce SMBus mode in PHY */
   15856 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15857 	if (rv != 0) {
   15858 		uint32_t reg2;
   15859 
   15860 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15861 		    __func__);
   15862 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15863 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15864 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15865 		delay(50 * 1000);
   15866 
   15867 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15868 		    &phyreg);
   15869 		if (rv != 0)
   15870 			goto release;
   15871 	}
   15872 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15873 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15874 
   15875 	/* Unforce SMBus mode in MAC */
   15876 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15877 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15878 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15879 
   15880 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15881 	if (rv != 0)
   15882 		goto release;
   15883 	phyreg |= HV_PM_CTRL_K1_ENA;
   15884 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15885 
   15886 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15887 	    &phyreg);
   15888 	if (rv != 0)
   15889 		goto release;
   15890 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15891 	    | I218_ULP_CONFIG1_STICKY_ULP
   15892 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15893 	    | I218_ULP_CONFIG1_WOL_HOST
   15894 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15895 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15896 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15897 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15898 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15899 	phyreg |= I218_ULP_CONFIG1_START;
   15900 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15901 
   15902 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15903 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15904 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15905 
   15906 release:
   15907 	/* Release semaphore */
   15908 	sc->phy.release(sc);
   15909 	wm_gmii_reset(sc);
   15910 	delay(50 * 1000);
   15911 
   15912 	return rv;
   15913 }
   15914 
   15915 /* WOL in the newer chipset interfaces (pchlan) */
   15916 static int
   15917 wm_enable_phy_wakeup(struct wm_softc *sc)
   15918 {
   15919 	device_t dev = sc->sc_dev;
   15920 	uint32_t mreg, moff;
   15921 	uint16_t wuce, wuc, wufc, preg;
   15922 	int i, rv;
   15923 
   15924 	KASSERT(sc->sc_type >= WM_T_PCH);
   15925 
   15926 	/* Copy MAC RARs to PHY RARs */
   15927 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15928 
   15929 	/* Activate PHY wakeup */
   15930 	rv = sc->phy.acquire(sc);
   15931 	if (rv != 0) {
   15932 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15933 		    __func__);
   15934 		return rv;
   15935 	}
   15936 
   15937 	/*
   15938 	 * Enable access to PHY wakeup registers.
   15939 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15940 	 */
   15941 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15942 	if (rv != 0) {
   15943 		device_printf(dev,
   15944 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15945 		goto release;
   15946 	}
   15947 
   15948 	/* Copy MAC MTA to PHY MTA */
   15949 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15950 		uint16_t lo, hi;
   15951 
   15952 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15953 		lo = (uint16_t)(mreg & 0xffff);
   15954 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15955 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15956 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15957 	}
   15958 
   15959 	/* Configure PHY Rx Control register */
   15960 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15961 	mreg = CSR_READ(sc, WMREG_RCTL);
   15962 	if (mreg & RCTL_UPE)
   15963 		preg |= BM_RCTL_UPE;
   15964 	if (mreg & RCTL_MPE)
   15965 		preg |= BM_RCTL_MPE;
   15966 	preg &= ~(BM_RCTL_MO_MASK);
   15967 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15968 	if (moff != 0)
   15969 		preg |= moff << BM_RCTL_MO_SHIFT;
   15970 	if (mreg & RCTL_BAM)
   15971 		preg |= BM_RCTL_BAM;
   15972 	if (mreg & RCTL_PMCF)
   15973 		preg |= BM_RCTL_PMCF;
   15974 	mreg = CSR_READ(sc, WMREG_CTRL);
   15975 	if (mreg & CTRL_RFCE)
   15976 		preg |= BM_RCTL_RFCE;
   15977 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15978 
   15979 	wuc = WUC_APME | WUC_PME_EN;
   15980 	wufc = WUFC_MAG;
   15981 	/* Enable PHY wakeup in MAC register */
   15982 	CSR_WRITE(sc, WMREG_WUC,
   15983 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15984 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15985 
   15986 	/* Configure and enable PHY wakeup in PHY registers */
   15987 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15988 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15989 
   15990 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15991 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15992 
   15993 release:
   15994 	sc->phy.release(sc);
   15995 
   15996 	return 0;
   15997 }
   15998 
   15999 /* Power down workaround on D3 */
   16000 static void
   16001 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   16002 {
   16003 	uint32_t reg;
   16004 	uint16_t phyreg;
   16005 	int i;
   16006 
   16007 	for (i = 0; i < 2; i++) {
   16008 		/* Disable link */
   16009 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16010 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16011 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16012 
   16013 		/*
   16014 		 * Call gig speed drop workaround on Gig disable before
   16015 		 * accessing any PHY registers
   16016 		 */
   16017 		if (sc->sc_type == WM_T_ICH8)
   16018 			wm_gig_downshift_workaround_ich8lan(sc);
   16019 
   16020 		/* Write VR power-down enable */
   16021 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16022 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16023 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16024 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16025 
   16026 		/* Read it back and test */
   16027 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16028 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16029 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16030 			break;
   16031 
   16032 		/* Issue PHY reset and repeat at most one more time */
   16033 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16034 	}
   16035 }
   16036 
   16037 /*
   16038  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16039  *  @sc: pointer to the HW structure
   16040  *
   16041  *  During S0 to Sx transition, it is possible the link remains at gig
   16042  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16043  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16044  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16045  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16046  *  needs to be written.
   16047  *  Parts that support (and are linked to a partner which support) EEE in
   16048  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16049  *  than 10Mbps w/o EEE.
   16050  */
   16051 static void
   16052 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16053 {
   16054 	device_t dev = sc->sc_dev;
   16055 	struct ethercom *ec = &sc->sc_ethercom;
   16056 	uint32_t phy_ctrl;
   16057 	int rv;
   16058 
   16059 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16060 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16061 
   16062 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16063 
   16064 	if (sc->sc_phytype == WMPHY_I217) {
   16065 		uint16_t devid = sc->sc_pcidevid;
   16066 
   16067 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16068 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16069 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16070 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16071 		    (sc->sc_type >= WM_T_PCH_SPT))
   16072 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16073 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16074 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16075 
   16076 		if (sc->phy.acquire(sc) != 0)
   16077 			goto out;
   16078 
   16079 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16080 			uint16_t eee_advert;
   16081 
   16082 			rv = wm_read_emi_reg_locked(dev,
   16083 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16084 			if (rv)
   16085 				goto release;
   16086 
   16087 			/*
   16088 			 * Disable LPLU if both link partners support 100BaseT
   16089 			 * EEE and 100Full is advertised on both ends of the
   16090 			 * link, and enable Auto Enable LPI since there will
   16091 			 * be no driver to enable LPI while in Sx.
   16092 			 */
   16093 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16094 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16095 				uint16_t anar, phy_reg;
   16096 
   16097 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16098 				    &anar);
   16099 				if (anar & ANAR_TX_FD) {
   16100 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16101 					    PHY_CTRL_NOND0A_LPLU);
   16102 
   16103 					/* Set Auto Enable LPI after link up */
   16104 					sc->phy.readreg_locked(dev, 2,
   16105 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16106 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16107 					sc->phy.writereg_locked(dev, 2,
   16108 					    I217_LPI_GPIO_CTRL, phy_reg);
   16109 				}
   16110 			}
   16111 		}
   16112 
   16113 		/*
   16114 		 * For i217 Intel Rapid Start Technology support,
   16115 		 * when the system is going into Sx and no manageability engine
   16116 		 * is present, the driver must configure proxy to reset only on
   16117 		 * power good.	LPI (Low Power Idle) state must also reset only
   16118 		 * on power good, as well as the MTA (Multicast table array).
   16119 		 * The SMBus release must also be disabled on LCD reset.
   16120 		 */
   16121 
   16122 		/*
   16123 		 * Enable MTA to reset for Intel Rapid Start Technology
   16124 		 * Support
   16125 		 */
   16126 
   16127 release:
   16128 		sc->phy.release(sc);
   16129 	}
   16130 out:
   16131 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16132 
   16133 	if (sc->sc_type == WM_T_ICH8)
   16134 		wm_gig_downshift_workaround_ich8lan(sc);
   16135 
   16136 	if (sc->sc_type >= WM_T_PCH) {
   16137 		wm_oem_bits_config_ich8lan(sc, false);
   16138 
   16139 		/* Reset PHY to activate OEM bits on 82577/8 */
   16140 		if (sc->sc_type == WM_T_PCH)
   16141 			wm_reset_phy(sc);
   16142 
   16143 		if (sc->phy.acquire(sc) != 0)
   16144 			return;
   16145 		wm_write_smbus_addr(sc);
   16146 		sc->phy.release(sc);
   16147 	}
   16148 }
   16149 
   16150 /*
   16151  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16152  *  @sc: pointer to the HW structure
   16153  *
   16154  *  During Sx to S0 transitions on non-managed devices or managed devices
   16155  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16156  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16157  *  the PHY.
   16158  *  On i217, setup Intel Rapid Start Technology.
   16159  */
   16160 static int
   16161 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16162 {
   16163 	device_t dev = sc->sc_dev;
   16164 	int rv;
   16165 
   16166 	if (sc->sc_type < WM_T_PCH2)
   16167 		return 0;
   16168 
   16169 	rv = wm_init_phy_workarounds_pchlan(sc);
   16170 	if (rv != 0)
   16171 		return rv;
   16172 
   16173 	/* For i217 Intel Rapid Start Technology support when the system
   16174 	 * is transitioning from Sx and no manageability engine is present
   16175 	 * configure SMBus to restore on reset, disable proxy, and enable
   16176 	 * the reset on MTA (Multicast table array).
   16177 	 */
   16178 	if (sc->sc_phytype == WMPHY_I217) {
   16179 		uint16_t phy_reg;
   16180 
   16181 		rv = sc->phy.acquire(sc);
   16182 		if (rv != 0)
   16183 			return rv;
   16184 
   16185 		/* Clear Auto Enable LPI after link up */
   16186 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16187 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16188 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16189 
   16190 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16191 			/* Restore clear on SMB if no manageability engine
   16192 			 * is present
   16193 			 */
   16194 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16195 			    &phy_reg);
   16196 			if (rv != 0)
   16197 				goto release;
   16198 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16199 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16200 
   16201 			/* Disable Proxy */
   16202 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16203 		}
   16204 		/* Enable reset on MTA */
   16205 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16206 		if (rv != 0)
   16207 			goto release;
   16208 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16209 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16210 
   16211 release:
   16212 		sc->phy.release(sc);
   16213 		return rv;
   16214 	}
   16215 
   16216 	return 0;
   16217 }
   16218 
   16219 static void
   16220 wm_enable_wakeup(struct wm_softc *sc)
   16221 {
   16222 	uint32_t reg, pmreg;
   16223 	pcireg_t pmode;
   16224 	int rv = 0;
   16225 
   16226 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16227 		device_xname(sc->sc_dev), __func__));
   16228 
   16229 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16230 	    &pmreg, NULL) == 0)
   16231 		return;
   16232 
   16233 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16234 		goto pme;
   16235 
   16236 	/* Advertise the wakeup capability */
   16237 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16238 	    | CTRL_SWDPIN(3));
   16239 
   16240 	/* Keep the laser running on fiber adapters */
   16241 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16242 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16243 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16244 		reg |= CTRL_EXT_SWDPIN(3);
   16245 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16246 	}
   16247 
   16248 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16249 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16250 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16251 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16252 		wm_suspend_workarounds_ich8lan(sc);
   16253 
   16254 #if 0	/* For the multicast packet */
   16255 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16256 	reg |= WUFC_MC;
   16257 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16258 #endif
   16259 
   16260 	if (sc->sc_type >= WM_T_PCH) {
   16261 		rv = wm_enable_phy_wakeup(sc);
   16262 		if (rv != 0)
   16263 			goto pme;
   16264 	} else {
   16265 		/* Enable wakeup by the MAC */
   16266 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16267 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16268 	}
   16269 
   16270 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16271 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16272 		|| (sc->sc_type == WM_T_PCH2))
   16273 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16274 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16275 
   16276 pme:
   16277 	/* Request PME */
   16278 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16279 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16280 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16281 		/* For WOL */
   16282 		pmode |= PCI_PMCSR_PME_EN;
   16283 	} else {
   16284 		/* Disable WOL */
   16285 		pmode &= ~PCI_PMCSR_PME_EN;
   16286 	}
   16287 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16288 }
   16289 
   16290 /* Disable ASPM L0s and/or L1 for workaround */
   16291 static void
   16292 wm_disable_aspm(struct wm_softc *sc)
   16293 {
   16294 	pcireg_t reg, mask = 0;
   16295 	unsigned const char *str = "";
   16296 
   16297 	/*
   16298 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16299 	 * space.
   16300 	 */
   16301 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16302 		return;
   16303 
   16304 	switch (sc->sc_type) {
   16305 	case WM_T_82571:
   16306 	case WM_T_82572:
   16307 		/*
   16308 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16309 		 * State Power management L1 State (ASPM L1).
   16310 		 */
   16311 		mask = PCIE_LCSR_ASPM_L1;
   16312 		str = "L1 is";
   16313 		break;
   16314 	case WM_T_82573:
   16315 	case WM_T_82574:
   16316 	case WM_T_82583:
   16317 		/*
   16318 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16319 		 *
   16320 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16321 		 * some chipset.  The document of 82574 and 82583 says that
   16322 		 * disabling L0s with some specific chipset is sufficient,
   16323 		 * but we follow as of the Intel em driver does.
   16324 		 *
   16325 		 * References:
   16326 		 * Errata 8 of the Specification Update of i82573.
   16327 		 * Errata 20 of the Specification Update of i82574.
   16328 		 * Errata 9 of the Specification Update of i82583.
   16329 		 */
   16330 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16331 		str = "L0s and L1 are";
   16332 		break;
   16333 	default:
   16334 		return;
   16335 	}
   16336 
   16337 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16338 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16339 	reg &= ~mask;
   16340 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16341 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16342 
   16343 	/* Print only in wm_attach() */
   16344 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16345 		aprint_verbose_dev(sc->sc_dev,
   16346 		    "ASPM %s disabled to workaround the errata.\n", str);
   16347 }
   16348 
   16349 /* LPLU */
   16350 
   16351 static void
   16352 wm_lplu_d0_disable(struct wm_softc *sc)
   16353 {
   16354 	struct mii_data *mii = &sc->sc_mii;
   16355 	uint32_t reg;
   16356 	uint16_t phyval;
   16357 
   16358 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16359 		device_xname(sc->sc_dev), __func__));
   16360 
   16361 	if (sc->sc_phytype == WMPHY_IFE)
   16362 		return;
   16363 
   16364 	switch (sc->sc_type) {
   16365 	case WM_T_82571:
   16366 	case WM_T_82572:
   16367 	case WM_T_82573:
   16368 	case WM_T_82575:
   16369 	case WM_T_82576:
   16370 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16371 		phyval &= ~PMR_D0_LPLU;
   16372 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16373 		break;
   16374 	case WM_T_82580:
   16375 	case WM_T_I350:
   16376 	case WM_T_I210:
   16377 	case WM_T_I211:
   16378 		reg = CSR_READ(sc, WMREG_PHPM);
   16379 		reg &= ~PHPM_D0A_LPLU;
   16380 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16381 		break;
   16382 	case WM_T_82574:
   16383 	case WM_T_82583:
   16384 	case WM_T_ICH8:
   16385 	case WM_T_ICH9:
   16386 	case WM_T_ICH10:
   16387 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16388 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16389 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16390 		CSR_WRITE_FLUSH(sc);
   16391 		break;
   16392 	case WM_T_PCH:
   16393 	case WM_T_PCH2:
   16394 	case WM_T_PCH_LPT:
   16395 	case WM_T_PCH_SPT:
   16396 	case WM_T_PCH_CNP:
   16397 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16398 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16399 		if (wm_phy_resetisblocked(sc) == false)
   16400 			phyval |= HV_OEM_BITS_ANEGNOW;
   16401 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16402 		break;
   16403 	default:
   16404 		break;
   16405 	}
   16406 }
   16407 
   16408 /* EEE */
   16409 
   16410 static int
   16411 wm_set_eee_i350(struct wm_softc *sc)
   16412 {
   16413 	struct ethercom *ec = &sc->sc_ethercom;
   16414 	uint32_t ipcnfg, eeer;
   16415 	uint32_t ipcnfg_mask
   16416 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16417 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16418 
   16419 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16420 
   16421 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16422 	eeer = CSR_READ(sc, WMREG_EEER);
   16423 
   16424 	/* Enable or disable per user setting */
   16425 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16426 		ipcnfg |= ipcnfg_mask;
   16427 		eeer |= eeer_mask;
   16428 	} else {
   16429 		ipcnfg &= ~ipcnfg_mask;
   16430 		eeer &= ~eeer_mask;
   16431 	}
   16432 
   16433 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16434 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16435 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16436 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16437 
   16438 	return 0;
   16439 }
   16440 
   16441 static int
   16442 wm_set_eee_pchlan(struct wm_softc *sc)
   16443 {
   16444 	device_t dev = sc->sc_dev;
   16445 	struct ethercom *ec = &sc->sc_ethercom;
   16446 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16447 	int rv;
   16448 
   16449 	switch (sc->sc_phytype) {
   16450 	case WMPHY_82579:
   16451 		lpa = I82579_EEE_LP_ABILITY;
   16452 		pcs_status = I82579_EEE_PCS_STATUS;
   16453 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16454 		break;
   16455 	case WMPHY_I217:
   16456 		lpa = I217_EEE_LP_ABILITY;
   16457 		pcs_status = I217_EEE_PCS_STATUS;
   16458 		adv_addr = I217_EEE_ADVERTISEMENT;
   16459 		break;
   16460 	default:
   16461 		return 0;
   16462 	}
   16463 
   16464 	rv = sc->phy.acquire(sc);
   16465 	if (rv != 0) {
   16466 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16467 		return rv;
   16468 	}
   16469 
   16470 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16471 	if (rv != 0)
   16472 		goto release;
   16473 
   16474 	/* Clear bits that enable EEE in various speeds */
   16475 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16476 
   16477 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16478 		/* Save off link partner's EEE ability */
   16479 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16480 		if (rv != 0)
   16481 			goto release;
   16482 
   16483 		/* Read EEE advertisement */
   16484 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16485 			goto release;
   16486 
   16487 		/*
   16488 		 * Enable EEE only for speeds in which the link partner is
   16489 		 * EEE capable and for which we advertise EEE.
   16490 		 */
   16491 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16492 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16493 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16494 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16495 			if ((data & ANLPAR_TX_FD) != 0)
   16496 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16497 			else {
   16498 				/*
   16499 				 * EEE is not supported in 100Half, so ignore
   16500 				 * partner's EEE in 100 ability if full-duplex
   16501 				 * is not advertised.
   16502 				 */
   16503 				sc->eee_lp_ability
   16504 				    &= ~AN_EEEADVERT_100_TX;
   16505 			}
   16506 		}
   16507 	}
   16508 
   16509 	if (sc->sc_phytype == WMPHY_82579) {
   16510 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16511 		if (rv != 0)
   16512 			goto release;
   16513 
   16514 		data &= ~I82579_LPI_PLL_SHUT_100;
   16515 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16516 	}
   16517 
   16518 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16519 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16520 		goto release;
   16521 
   16522 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16523 release:
   16524 	sc->phy.release(sc);
   16525 
   16526 	return rv;
   16527 }
   16528 
   16529 static int
   16530 wm_set_eee(struct wm_softc *sc)
   16531 {
   16532 	struct ethercom *ec = &sc->sc_ethercom;
   16533 
   16534 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16535 		return 0;
   16536 
   16537 	if (sc->sc_type == WM_T_I354) {
   16538 		/* I354 uses an external PHY */
   16539 		return 0; /* not yet */
   16540 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16541 		return wm_set_eee_i350(sc);
   16542 	else if (sc->sc_type >= WM_T_PCH2)
   16543 		return wm_set_eee_pchlan(sc);
   16544 
   16545 	return 0;
   16546 }
   16547 
   16548 /*
   16549  * Workarounds (mainly PHY related).
   16550  * Basically, PHY's workarounds are in the PHY drivers.
   16551  */
   16552 
   16553 /* Workaround for 82566 Kumeran PCS lock loss */
   16554 static int
   16555 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16556 {
   16557 	struct mii_data *mii = &sc->sc_mii;
   16558 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16559 	int i, reg, rv;
   16560 	uint16_t phyreg;
   16561 
   16562 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16563 		device_xname(sc->sc_dev), __func__));
   16564 
   16565 	/* If the link is not up, do nothing */
   16566 	if ((status & STATUS_LU) == 0)
   16567 		return 0;
   16568 
   16569 	/* Nothing to do if the link is other than 1Gbps */
   16570 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16571 		return 0;
   16572 
   16573 	for (i = 0; i < 10; i++) {
   16574 		/* read twice */
   16575 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16576 		if (rv != 0)
   16577 			return rv;
   16578 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16579 		if (rv != 0)
   16580 			return rv;
   16581 
   16582 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16583 			goto out;	/* GOOD! */
   16584 
   16585 		/* Reset the PHY */
   16586 		wm_reset_phy(sc);
   16587 		delay(5*1000);
   16588 	}
   16589 
   16590 	/* Disable GigE link negotiation */
   16591 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16592 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16593 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16594 
   16595 	/*
   16596 	 * Call gig speed drop workaround on Gig disable before accessing
   16597 	 * any PHY registers.
   16598 	 */
   16599 	wm_gig_downshift_workaround_ich8lan(sc);
   16600 
   16601 out:
   16602 	return 0;
   16603 }
   16604 
   16605 /*
   16606  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16607  *  @sc: pointer to the HW structure
   16608  *
   16609  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16610  *  LPLU, Gig disable, MDIC PHY reset):
   16611  *    1) Set Kumeran Near-end loopback
   16612  *    2) Clear Kumeran Near-end loopback
   16613  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16614  */
   16615 static void
   16616 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16617 {
   16618 	uint16_t kmreg;
   16619 
   16620 	/* Only for igp3 */
   16621 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16622 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16623 			return;
   16624 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16625 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16626 			return;
   16627 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16628 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16629 	}
   16630 }
   16631 
   16632 /*
   16633  * Workaround for pch's PHYs
   16634  * XXX should be moved to new PHY driver?
   16635  */
   16636 static int
   16637 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16638 {
   16639 	device_t dev = sc->sc_dev;
   16640 	struct mii_data *mii = &sc->sc_mii;
   16641 	struct mii_softc *child;
   16642 	uint16_t phy_data, phyrev = 0;
   16643 	int phytype = sc->sc_phytype;
   16644 	int rv;
   16645 
   16646 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16647 		device_xname(dev), __func__));
   16648 	KASSERT(sc->sc_type == WM_T_PCH);
   16649 
   16650 	/* Set MDIO slow mode before any other MDIO access */
   16651 	if (phytype == WMPHY_82577)
   16652 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16653 			return rv;
   16654 
   16655 	child = LIST_FIRST(&mii->mii_phys);
   16656 	if (child != NULL)
   16657 		phyrev = child->mii_mpd_rev;
   16658 
   16659 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16660 	if ((child != NULL) &&
   16661 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16662 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16663 		/* Disable generation of early preamble (0x4431) */
   16664 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16665 		    &phy_data);
   16666 		if (rv != 0)
   16667 			return rv;
   16668 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16669 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16670 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16671 		    phy_data);
   16672 		if (rv != 0)
   16673 			return rv;
   16674 
   16675 		/* Preamble tuning for SSC */
   16676 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16677 		if (rv != 0)
   16678 			return rv;
   16679 	}
   16680 
   16681 	/* 82578 */
   16682 	if (phytype == WMPHY_82578) {
   16683 		/*
   16684 		 * Return registers to default by doing a soft reset then
   16685 		 * writing 0x3140 to the control register
   16686 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16687 		 */
   16688 		if ((child != NULL) && (phyrev < 2)) {
   16689 			PHY_RESET(child);
   16690 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16691 			if (rv != 0)
   16692 				return rv;
   16693 		}
   16694 	}
   16695 
   16696 	/* Select page 0 */
   16697 	if ((rv = sc->phy.acquire(sc)) != 0)
   16698 		return rv;
   16699 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16700 	sc->phy.release(sc);
   16701 	if (rv != 0)
   16702 		return rv;
   16703 
   16704 	/*
   16705 	 * Configure the K1 Si workaround during phy reset assuming there is
   16706 	 * link so that it disables K1 if link is in 1Gbps.
   16707 	 */
   16708 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16709 		return rv;
   16710 
   16711 	/* Workaround for link disconnects on a busy hub in half duplex */
   16712 	rv = sc->phy.acquire(sc);
   16713 	if (rv)
   16714 		return rv;
   16715 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16716 	if (rv)
   16717 		goto release;
   16718 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16719 	    phy_data & 0x00ff);
   16720 	if (rv)
   16721 		goto release;
   16722 
   16723 	/* Set MSE higher to enable link to stay up when noise is high */
   16724 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16725 release:
   16726 	sc->phy.release(sc);
   16727 
   16728 	return rv;
   16729 }
   16730 
   16731 /*
   16732  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16733  *  @sc:   pointer to the HW structure
   16734  */
   16735 static void
   16736 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16737 {
   16738 
   16739 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16740 		device_xname(sc->sc_dev), __func__));
   16741 
   16742 	if (sc->phy.acquire(sc) != 0)
   16743 		return;
   16744 
   16745 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16746 
   16747 	sc->phy.release(sc);
   16748 }
   16749 
   16750 static void
   16751 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16752 {
   16753 	device_t dev = sc->sc_dev;
   16754 	uint32_t mac_reg;
   16755 	uint16_t i, wuce;
   16756 	int count;
   16757 
   16758 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16759 		device_xname(dev), __func__));
   16760 
   16761 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16762 		return;
   16763 
   16764 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16765 	count = wm_rar_count(sc);
   16766 	for (i = 0; i < count; i++) {
   16767 		uint16_t lo, hi;
   16768 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16769 		lo = (uint16_t)(mac_reg & 0xffff);
   16770 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16771 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16772 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16773 
   16774 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16775 		lo = (uint16_t)(mac_reg & 0xffff);
   16776 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16777 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16778 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16779 	}
   16780 
   16781 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16782 }
   16783 
   16784 /*
   16785  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16786  *  with 82579 PHY
   16787  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16788  */
   16789 static int
   16790 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16791 {
   16792 	device_t dev = sc->sc_dev;
   16793 	int rar_count;
   16794 	int rv;
   16795 	uint32_t mac_reg;
   16796 	uint16_t dft_ctrl, data;
   16797 	uint16_t i;
   16798 
   16799 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16800 		device_xname(dev), __func__));
   16801 
   16802 	if (sc->sc_type < WM_T_PCH2)
   16803 		return 0;
   16804 
   16805 	/* Acquire PHY semaphore */
   16806 	rv = sc->phy.acquire(sc);
   16807 	if (rv != 0)
   16808 		return rv;
   16809 
   16810 	/* Disable Rx path while enabling/disabling workaround */
   16811 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16812 	if (rv != 0)
   16813 		goto out;
   16814 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16815 	    dft_ctrl | (1 << 14));
   16816 	if (rv != 0)
   16817 		goto out;
   16818 
   16819 	if (enable) {
   16820 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16821 		 * SHRAL/H) and initial CRC values to the MAC
   16822 		 */
   16823 		rar_count = wm_rar_count(sc);
   16824 		for (i = 0; i < rar_count; i++) {
   16825 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16826 			uint32_t addr_high, addr_low;
   16827 
   16828 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16829 			if (!(addr_high & RAL_AV))
   16830 				continue;
   16831 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16832 			mac_addr[0] = (addr_low & 0xFF);
   16833 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16834 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16835 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16836 			mac_addr[4] = (addr_high & 0xFF);
   16837 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16838 
   16839 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16840 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16841 		}
   16842 
   16843 		/* Write Rx addresses to the PHY */
   16844 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16845 	}
   16846 
   16847 	/*
   16848 	 * If enable ==
   16849 	 *	true: Enable jumbo frame workaround in the MAC.
   16850 	 *	false: Write MAC register values back to h/w defaults.
   16851 	 */
   16852 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16853 	if (enable) {
   16854 		mac_reg &= ~(1 << 14);
   16855 		mac_reg |= (7 << 15);
   16856 	} else
   16857 		mac_reg &= ~(0xf << 14);
   16858 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16859 
   16860 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16861 	if (enable) {
   16862 		mac_reg |= RCTL_SECRC;
   16863 		sc->sc_rctl |= RCTL_SECRC;
   16864 		sc->sc_flags |= WM_F_CRC_STRIP;
   16865 	} else {
   16866 		mac_reg &= ~RCTL_SECRC;
   16867 		sc->sc_rctl &= ~RCTL_SECRC;
   16868 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16869 	}
   16870 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16871 
   16872 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16873 	if (rv != 0)
   16874 		goto out;
   16875 	if (enable)
   16876 		data |= 1 << 0;
   16877 	else
   16878 		data &= ~(1 << 0);
   16879 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16880 	if (rv != 0)
   16881 		goto out;
   16882 
   16883 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16884 	if (rv != 0)
   16885 		goto out;
   16886 	/*
   16887 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16888 	 * on both the enable case and the disable case. Is it correct?
   16889 	 */
   16890 	data &= ~(0xf << 8);
   16891 	data |= (0xb << 8);
   16892 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16893 	if (rv != 0)
   16894 		goto out;
   16895 
   16896 	/*
   16897 	 * If enable ==
   16898 	 *	true: Enable jumbo frame workaround in the PHY.
   16899 	 *	false: Write PHY register values back to h/w defaults.
   16900 	 */
   16901 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16902 	if (rv != 0)
   16903 		goto out;
   16904 	data &= ~(0x7F << 5);
   16905 	if (enable)
   16906 		data |= (0x37 << 5);
   16907 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16908 	if (rv != 0)
   16909 		goto out;
   16910 
   16911 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16912 	if (rv != 0)
   16913 		goto out;
   16914 	if (enable)
   16915 		data &= ~(1 << 13);
   16916 	else
   16917 		data |= (1 << 13);
   16918 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16919 	if (rv != 0)
   16920 		goto out;
   16921 
   16922 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16923 	if (rv != 0)
   16924 		goto out;
   16925 	data &= ~(0x3FF << 2);
   16926 	if (enable)
   16927 		data |= (I82579_TX_PTR_GAP << 2);
   16928 	else
   16929 		data |= (0x8 << 2);
   16930 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16931 	if (rv != 0)
   16932 		goto out;
   16933 
   16934 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16935 	    enable ? 0xf100 : 0x7e00);
   16936 	if (rv != 0)
   16937 		goto out;
   16938 
   16939 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16940 	if (rv != 0)
   16941 		goto out;
   16942 	if (enable)
   16943 		data |= 1 << 10;
   16944 	else
   16945 		data &= ~(1 << 10);
   16946 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16947 	if (rv != 0)
   16948 		goto out;
   16949 
   16950 	/* Re-enable Rx path after enabling/disabling workaround */
   16951 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16952 	    dft_ctrl & ~(1 << 14));
   16953 
   16954 out:
   16955 	sc->phy.release(sc);
   16956 
   16957 	return rv;
   16958 }
   16959 
   16960 /*
   16961  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16962  *  done after every PHY reset.
   16963  */
   16964 static int
   16965 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16966 {
   16967 	device_t dev = sc->sc_dev;
   16968 	int rv;
   16969 
   16970 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16971 		device_xname(dev), __func__));
   16972 	KASSERT(sc->sc_type == WM_T_PCH2);
   16973 
   16974 	/* Set MDIO slow mode before any other MDIO access */
   16975 	rv = wm_set_mdio_slow_mode_hv(sc);
   16976 	if (rv != 0)
   16977 		return rv;
   16978 
   16979 	rv = sc->phy.acquire(sc);
   16980 	if (rv != 0)
   16981 		return rv;
   16982 	/* Set MSE higher to enable link to stay up when noise is high */
   16983 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16984 	if (rv != 0)
   16985 		goto release;
   16986 	/* Drop link after 5 times MSE threshold was reached */
   16987 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16988 release:
   16989 	sc->phy.release(sc);
   16990 
   16991 	return rv;
   16992 }
   16993 
   16994 /**
   16995  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16996  *  @link: link up bool flag
   16997  *
   16998  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16999  *  preventing further DMA write requests.  Workaround the issue by disabling
   17000  *  the de-assertion of the clock request when in 1Gpbs mode.
   17001  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   17002  *  speeds in order to avoid Tx hangs.
   17003  **/
   17004 static int
   17005 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   17006 {
   17007 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   17008 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17009 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   17010 	uint16_t phyreg;
   17011 
   17012 	if (link && (speed == STATUS_SPEED_1000)) {
   17013 		int rv;
   17014 
   17015 		rv = sc->phy.acquire(sc);
   17016 		if (rv != 0)
   17017 			return rv;
   17018 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17019 		    &phyreg);
   17020 		if (rv != 0)
   17021 			goto release;
   17022 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17023 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17024 		if (rv != 0)
   17025 			goto release;
   17026 		delay(20);
   17027 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17028 
   17029 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17030 		    &phyreg);
   17031 release:
   17032 		sc->phy.release(sc);
   17033 		return rv;
   17034 	}
   17035 
   17036 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17037 
   17038 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17039 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17040 	    || !link
   17041 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17042 		goto update_fextnvm6;
   17043 
   17044 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17045 
   17046 	/* Clear link status transmit timeout */
   17047 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17048 	if (speed == STATUS_SPEED_100) {
   17049 		/* Set inband Tx timeout to 5x10us for 100Half */
   17050 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17051 
   17052 		/* Do not extend the K1 entry latency for 100Half */
   17053 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17054 	} else {
   17055 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17056 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17057 
   17058 		/* Extend the K1 entry latency for 10 Mbps */
   17059 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17060 	}
   17061 
   17062 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17063 
   17064 update_fextnvm6:
   17065 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17066 	return 0;
   17067 }
   17068 
   17069 /*
   17070  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17071  *  @sc:   pointer to the HW structure
   17072  *  @link: link up bool flag
   17073  *
   17074  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17075  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17076  *  If link is down, the function will restore the default K1 setting located
   17077  *  in the NVM.
   17078  */
   17079 static int
   17080 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17081 {
   17082 	int k1_enable = sc->sc_nvm_k1_enabled;
   17083 	int rv;
   17084 
   17085 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17086 		device_xname(sc->sc_dev), __func__));
   17087 
   17088 	rv = sc->phy.acquire(sc);
   17089 	if (rv != 0)
   17090 		return rv;
   17091 
   17092 	if (link) {
   17093 		k1_enable = 0;
   17094 
   17095 		/* Link stall fix for link up */
   17096 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17097 		    0x0100);
   17098 	} else {
   17099 		/* Link stall fix for link down */
   17100 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17101 		    0x4100);
   17102 	}
   17103 
   17104 	wm_configure_k1_ich8lan(sc, k1_enable);
   17105 	sc->phy.release(sc);
   17106 
   17107 	return 0;
   17108 }
   17109 
   17110 /*
   17111  *  wm_k1_workaround_lv - K1 Si workaround
   17112  *  @sc:   pointer to the HW structure
   17113  *
   17114  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17115  *  Disable K1 for 1000 and 100 speeds
   17116  */
   17117 static int
   17118 wm_k1_workaround_lv(struct wm_softc *sc)
   17119 {
   17120 	uint32_t reg;
   17121 	uint16_t phyreg;
   17122 	int rv;
   17123 
   17124 	if (sc->sc_type != WM_T_PCH2)
   17125 		return 0;
   17126 
   17127 	/* Set K1 beacon duration based on 10Mbps speed */
   17128 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17129 	if (rv != 0)
   17130 		return rv;
   17131 
   17132 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17133 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17134 		if (phyreg &
   17135 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17136 			/* LV 1G/100 Packet drop issue wa  */
   17137 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17138 			    &phyreg);
   17139 			if (rv != 0)
   17140 				return rv;
   17141 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17142 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17143 			    phyreg);
   17144 			if (rv != 0)
   17145 				return rv;
   17146 		} else {
   17147 			/* For 10Mbps */
   17148 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17149 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17150 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17151 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17152 		}
   17153 	}
   17154 
   17155 	return 0;
   17156 }
   17157 
   17158 /*
   17159  *  wm_link_stall_workaround_hv - Si workaround
   17160  *  @sc: pointer to the HW structure
   17161  *
   17162  *  This function works around a Si bug where the link partner can get
   17163  *  a link up indication before the PHY does. If small packets are sent
   17164  *  by the link partner they can be placed in the packet buffer without
   17165  *  being properly accounted for by the PHY and will stall preventing
   17166  *  further packets from being received.  The workaround is to clear the
   17167  *  packet buffer after the PHY detects link up.
   17168  */
   17169 static int
   17170 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17171 {
   17172 	uint16_t phyreg;
   17173 
   17174 	if (sc->sc_phytype != WMPHY_82578)
   17175 		return 0;
   17176 
   17177 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17178 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17179 	if ((phyreg & BMCR_LOOP) != 0)
   17180 		return 0;
   17181 
   17182 	/* Check if link is up and at 1Gbps */
   17183 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17184 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17185 	    | BM_CS_STATUS_SPEED_MASK;
   17186 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17187 		| BM_CS_STATUS_SPEED_1000))
   17188 		return 0;
   17189 
   17190 	delay(200 * 1000);	/* XXX too big */
   17191 
   17192 	/* Flush the packets in the fifo buffer */
   17193 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17194 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17195 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17196 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17197 
   17198 	return 0;
   17199 }
   17200 
   17201 static int
   17202 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17203 {
   17204 	int rv;
   17205 
   17206 	rv = sc->phy.acquire(sc);
   17207 	if (rv != 0) {
   17208 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17209 		    __func__);
   17210 		return rv;
   17211 	}
   17212 
   17213 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17214 
   17215 	sc->phy.release(sc);
   17216 
   17217 	return rv;
   17218 }
   17219 
   17220 static int
   17221 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17222 {
   17223 	int rv;
   17224 	uint16_t reg;
   17225 
   17226 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17227 	if (rv != 0)
   17228 		return rv;
   17229 
   17230 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17231 	    reg | HV_KMRN_MDIO_SLOW);
   17232 }
   17233 
   17234 /*
   17235  *  wm_configure_k1_ich8lan - Configure K1 power state
   17236  *  @sc: pointer to the HW structure
   17237  *  @enable: K1 state to configure
   17238  *
   17239  *  Configure the K1 power state based on the provided parameter.
   17240  *  Assumes semaphore already acquired.
   17241  */
   17242 static void
   17243 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17244 {
   17245 	uint32_t ctrl, ctrl_ext, tmp;
   17246 	uint16_t kmreg;
   17247 	int rv;
   17248 
   17249 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17250 
   17251 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17252 	if (rv != 0)
   17253 		return;
   17254 
   17255 	if (k1_enable)
   17256 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17257 	else
   17258 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17259 
   17260 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17261 	if (rv != 0)
   17262 		return;
   17263 
   17264 	delay(20);
   17265 
   17266 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17267 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17268 
   17269 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17270 	tmp |= CTRL_FRCSPD;
   17271 
   17272 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17273 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17274 	CSR_WRITE_FLUSH(sc);
   17275 	delay(20);
   17276 
   17277 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17278 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17279 	CSR_WRITE_FLUSH(sc);
   17280 	delay(20);
   17281 
   17282 	return;
   17283 }
   17284 
   17285 /* special case - for 82575 - need to do manual init ... */
   17286 static void
   17287 wm_reset_init_script_82575(struct wm_softc *sc)
   17288 {
   17289 	/*
   17290 	 * Remark: this is untested code - we have no board without EEPROM
   17291 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17292 	 */
   17293 
   17294 	/* SerDes configuration via SERDESCTRL */
   17295 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17296 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17297 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17298 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17299 
   17300 	/* CCM configuration via CCMCTL register */
   17301 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17302 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17303 
   17304 	/* PCIe lanes configuration */
   17305 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17306 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17307 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17308 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17309 
   17310 	/* PCIe PLL Configuration */
   17311 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17312 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17313 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17314 }
   17315 
   17316 static void
   17317 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17318 {
   17319 	uint32_t reg;
   17320 	uint16_t nvmword;
   17321 	int rv;
   17322 
   17323 	if (sc->sc_type != WM_T_82580)
   17324 		return;
   17325 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17326 		return;
   17327 
   17328 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17329 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17330 	if (rv != 0) {
   17331 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17332 		    __func__);
   17333 		return;
   17334 	}
   17335 
   17336 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17337 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17338 		reg |= MDICNFG_DEST;
   17339 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17340 		reg |= MDICNFG_COM_MDIO;
   17341 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17342 }
   17343 
   17344 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17345 
   17346 static bool
   17347 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17348 {
   17349 	uint32_t reg;
   17350 	uint16_t id1, id2;
   17351 	int i, rv;
   17352 
   17353 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17354 		device_xname(sc->sc_dev), __func__));
   17355 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17356 
   17357 	id1 = id2 = 0xffff;
   17358 	for (i = 0; i < 2; i++) {
   17359 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17360 		    &id1);
   17361 		if ((rv != 0) || MII_INVALIDID(id1))
   17362 			continue;
   17363 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17364 		    &id2);
   17365 		if ((rv != 0) || MII_INVALIDID(id2))
   17366 			continue;
   17367 		break;
   17368 	}
   17369 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17370 		goto out;
   17371 
   17372 	/*
   17373 	 * In case the PHY needs to be in mdio slow mode,
   17374 	 * set slow mode and try to get the PHY id again.
   17375 	 */
   17376 	rv = 0;
   17377 	if (sc->sc_type < WM_T_PCH_LPT) {
   17378 		wm_set_mdio_slow_mode_hv_locked(sc);
   17379 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17380 		    &id1);
   17381 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17382 		    &id2);
   17383 	}
   17384 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17385 		device_printf(sc->sc_dev, "XXX return with false\n");
   17386 		return false;
   17387 	}
   17388 out:
   17389 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17390 		/* Only unforce SMBus if ME is not active */
   17391 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17392 			uint16_t phyreg;
   17393 
   17394 			/* Unforce SMBus mode in PHY */
   17395 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17396 			    CV_SMB_CTRL, &phyreg);
   17397 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17398 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17399 			    CV_SMB_CTRL, phyreg);
   17400 
   17401 			/* Unforce SMBus mode in MAC */
   17402 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17403 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17404 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17405 		}
   17406 	}
   17407 	return true;
   17408 }
   17409 
   17410 static void
   17411 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17412 {
   17413 	uint32_t reg;
   17414 	int i;
   17415 
   17416 	/* Set PHY Config Counter to 50msec */
   17417 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17418 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17419 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17420 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17421 
   17422 	/* Toggle LANPHYPC */
   17423 	reg = CSR_READ(sc, WMREG_CTRL);
   17424 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17425 	reg &= ~CTRL_LANPHYPC_VALUE;
   17426 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17427 	CSR_WRITE_FLUSH(sc);
   17428 	delay(1000);
   17429 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17430 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17431 	CSR_WRITE_FLUSH(sc);
   17432 
   17433 	if (sc->sc_type < WM_T_PCH_LPT)
   17434 		delay(50 * 1000);
   17435 	else {
   17436 		i = 20;
   17437 
   17438 		do {
   17439 			delay(5 * 1000);
   17440 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17441 		    && i--);
   17442 
   17443 		delay(30 * 1000);
   17444 	}
   17445 }
   17446 
   17447 static int
   17448 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17449 {
   17450 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17451 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17452 	uint32_t rxa;
   17453 	uint16_t scale = 0, lat_enc = 0;
   17454 	int32_t obff_hwm = 0;
   17455 	int64_t lat_ns, value;
   17456 
   17457 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17458 		device_xname(sc->sc_dev), __func__));
   17459 
   17460 	if (link) {
   17461 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17462 		uint32_t status;
   17463 		uint16_t speed;
   17464 		pcireg_t preg;
   17465 
   17466 		status = CSR_READ(sc, WMREG_STATUS);
   17467 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17468 		case STATUS_SPEED_10:
   17469 			speed = 10;
   17470 			break;
   17471 		case STATUS_SPEED_100:
   17472 			speed = 100;
   17473 			break;
   17474 		case STATUS_SPEED_1000:
   17475 			speed = 1000;
   17476 			break;
   17477 		default:
   17478 			device_printf(sc->sc_dev, "Unknown speed "
   17479 			    "(status = %08x)\n", status);
   17480 			return -1;
   17481 		}
   17482 
   17483 		/* Rx Packet Buffer Allocation size (KB) */
   17484 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17485 
   17486 		/*
   17487 		 * Determine the maximum latency tolerated by the device.
   17488 		 *
   17489 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17490 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17491 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17492 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17493 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17494 		 */
   17495 		lat_ns = ((int64_t)rxa * 1024 -
   17496 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17497 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17498 		if (lat_ns < 0)
   17499 			lat_ns = 0;
   17500 		else
   17501 			lat_ns /= speed;
   17502 		value = lat_ns;
   17503 
   17504 		while (value > LTRV_VALUE) {
   17505 			scale ++;
   17506 			value = howmany(value, __BIT(5));
   17507 		}
   17508 		if (scale > LTRV_SCALE_MAX) {
   17509 			device_printf(sc->sc_dev,
   17510 			    "Invalid LTR latency scale %d\n", scale);
   17511 			return -1;
   17512 		}
   17513 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17514 
   17515 		/* Determine the maximum latency tolerated by the platform */
   17516 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17517 		    WM_PCI_LTR_CAP_LPT);
   17518 		max_snoop = preg & 0xffff;
   17519 		max_nosnoop = preg >> 16;
   17520 
   17521 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17522 
   17523 		if (lat_enc > max_ltr_enc) {
   17524 			lat_enc = max_ltr_enc;
   17525 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17526 			    * PCI_LTR_SCALETONS(
   17527 				    __SHIFTOUT(lat_enc,
   17528 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17529 		}
   17530 
   17531 		if (lat_ns) {
   17532 			lat_ns *= speed * 1000;
   17533 			lat_ns /= 8;
   17534 			lat_ns /= 1000000000;
   17535 			obff_hwm = (int32_t)(rxa - lat_ns);
   17536 		}
   17537 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17538 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17539 			    "(rxa = %d, lat_ns = %d)\n",
   17540 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17541 			return -1;
   17542 		}
   17543 	}
   17544 	/* Snoop and No-Snoop latencies the same */
   17545 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17546 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17547 
   17548 	/* Set OBFF high water mark */
   17549 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17550 	reg |= obff_hwm;
   17551 	CSR_WRITE(sc, WMREG_SVT, reg);
   17552 
   17553 	/* Enable OBFF */
   17554 	reg = CSR_READ(sc, WMREG_SVCR);
   17555 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17556 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17557 
   17558 	return 0;
   17559 }
   17560 
   17561 /*
   17562  * I210 Errata 25 and I211 Errata 10
   17563  * Slow System Clock.
   17564  *
   17565  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17566  */
   17567 static int
   17568 wm_pll_workaround_i210(struct wm_softc *sc)
   17569 {
   17570 	uint32_t mdicnfg, wuc;
   17571 	uint32_t reg;
   17572 	pcireg_t pcireg;
   17573 	uint32_t pmreg;
   17574 	uint16_t nvmword, tmp_nvmword;
   17575 	uint16_t phyval;
   17576 	bool wa_done = false;
   17577 	int i, rv = 0;
   17578 
   17579 	/* Get Power Management cap offset */
   17580 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17581 	    &pmreg, NULL) == 0)
   17582 		return -1;
   17583 
   17584 	/* Save WUC and MDICNFG registers */
   17585 	wuc = CSR_READ(sc, WMREG_WUC);
   17586 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17587 
   17588 	reg = mdicnfg & ~MDICNFG_DEST;
   17589 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17590 
   17591 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17592 		/*
   17593 		 * The default value of the Initialization Control Word 1
   17594 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17595 		 */
   17596 		nvmword = INVM_DEFAULT_AL;
   17597 	}
   17598 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17599 
   17600 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17601 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17602 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17603 
   17604 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17605 			rv = 0;
   17606 			break; /* OK */
   17607 		} else
   17608 			rv = -1;
   17609 
   17610 		wa_done = true;
   17611 		/* Directly reset the internal PHY */
   17612 		reg = CSR_READ(sc, WMREG_CTRL);
   17613 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17614 
   17615 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17616 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17617 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17618 
   17619 		CSR_WRITE(sc, WMREG_WUC, 0);
   17620 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17621 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17622 
   17623 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17624 		    pmreg + PCI_PMCSR);
   17625 		pcireg |= PCI_PMCSR_STATE_D3;
   17626 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17627 		    pmreg + PCI_PMCSR, pcireg);
   17628 		delay(1000);
   17629 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17630 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17631 		    pmreg + PCI_PMCSR, pcireg);
   17632 
   17633 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17634 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17635 
   17636 		/* Restore WUC register */
   17637 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17638 	}
   17639 
   17640 	/* Restore MDICNFG setting */
   17641 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17642 	if (wa_done)
   17643 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17644 	return rv;
   17645 }
   17646 
   17647 static void
   17648 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17649 {
   17650 	uint32_t reg;
   17651 
   17652 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17653 		device_xname(sc->sc_dev), __func__));
   17654 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17655 	    || (sc->sc_type == WM_T_PCH_CNP));
   17656 
   17657 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17658 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17659 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17660 
   17661 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17662 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17663 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17664 }
   17665 
   17666 /* Sysctl functions */
   17667 static int
   17668 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17669 {
   17670 	struct sysctlnode node = *rnode;
   17671 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17672 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17673 	struct wm_softc *sc = txq->txq_sc;
   17674 	uint32_t reg;
   17675 
   17676 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17677 	node.sysctl_data = &reg;
   17678 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17679 }
   17680 
   17681 static int
   17682 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17683 {
   17684 	struct sysctlnode node = *rnode;
   17685 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17686 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17687 	struct wm_softc *sc = txq->txq_sc;
   17688 	uint32_t reg;
   17689 
   17690 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17691 	node.sysctl_data = &reg;
   17692 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17693 }
   17694 
   17695 #ifdef WM_DEBUG
   17696 static int
   17697 wm_sysctl_debug(SYSCTLFN_ARGS)
   17698 {
   17699 	struct sysctlnode node = *rnode;
   17700 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17701 	uint32_t dflags;
   17702 	int error;
   17703 
   17704 	dflags = sc->sc_debug;
   17705 	node.sysctl_data = &dflags;
   17706 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17707 
   17708 	if (error || newp == NULL)
   17709 		return error;
   17710 
   17711 	sc->sc_debug = dflags;
   17712 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17713 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17714 
   17715 	return 0;
   17716 }
   17717 #endif
   17718