Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.763
      1 /*	$NetBSD: if_wm.c,v 1.763 2022/08/12 10:59:42 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.763 2022/08/12 10:59:42 riastradh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 
    464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	struct work wmq_cookie;
    482 	void *wmq_si;
    483 };
    484 
    485 struct wm_phyop {
    486 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    487 	void (*release)(struct wm_softc *);
    488 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    489 	int (*writereg_locked)(device_t, int, int, uint16_t);
    490 	int reset_delay_us;
    491 	bool no_errprint;
    492 };
    493 
    494 struct wm_nvmop {
    495 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    496 	void (*release)(struct wm_softc *);
    497 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    498 };
    499 
    500 /*
    501  * Software state per device.
    502  */
    503 struct wm_softc {
    504 	device_t sc_dev;		/* generic device information */
    505 	bus_space_tag_t sc_st;		/* bus space tag */
    506 	bus_space_handle_t sc_sh;	/* bus space handle */
    507 	bus_size_t sc_ss;		/* bus space size */
    508 	bus_space_tag_t sc_iot;		/* I/O space tag */
    509 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    510 	bus_size_t sc_ios;		/* I/O space size */
    511 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    512 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    513 	bus_size_t sc_flashs;		/* flash registers space size */
    514 	off_t sc_flashreg_offset;	/*
    515 					 * offset to flash registers from
    516 					 * start of BAR
    517 					 */
    518 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    519 
    520 	struct ethercom sc_ethercom;	/* Ethernet common data */
    521 	struct mii_data sc_mii;		/* MII/media information */
    522 
    523 	pci_chipset_tag_t sc_pc;
    524 	pcitag_t sc_pcitag;
    525 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    526 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    527 
    528 	uint16_t sc_pcidevid;		/* PCI device ID */
    529 	wm_chip_type sc_type;		/* MAC type */
    530 	int sc_rev;			/* MAC revision */
    531 	wm_phy_type sc_phytype;		/* PHY type */
    532 	uint8_t sc_sfptype;		/* SFP type */
    533 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    534 #define	WM_MEDIATYPE_UNKNOWN		0x00
    535 #define	WM_MEDIATYPE_FIBER		0x01
    536 #define	WM_MEDIATYPE_COPPER		0x02
    537 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    538 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    539 	int sc_flags;			/* flags; see below */
    540 	u_short sc_if_flags;		/* last if_flags */
    541 	int sc_ec_capenable;		/* last ec_capenable */
    542 	int sc_flowflags;		/* 802.3x flow control flags */
    543 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    544 	int sc_align_tweak;
    545 
    546 	void *sc_ihs[WM_MAX_NINTR];	/*
    547 					 * interrupt cookie.
    548 					 * - legacy and msi use sc_ihs[0] only
    549 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    550 					 */
    551 	pci_intr_handle_t *sc_intrs;	/*
    552 					 * legacy and msi use sc_intrs[0] only
    553 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    554 					 */
    555 	int sc_nintrs;			/* number of interrupts */
    556 
    557 	int sc_link_intr_idx;		/* index of MSI-X tables */
    558 
    559 	callout_t sc_tick_ch;		/* tick callout */
    560 	bool sc_core_stopping;
    561 
    562 	int sc_nvm_ver_major;
    563 	int sc_nvm_ver_minor;
    564 	int sc_nvm_ver_build;
    565 	int sc_nvm_addrbits;		/* NVM address bits */
    566 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    567 	int sc_ich8_flash_base;
    568 	int sc_ich8_flash_bank_size;
    569 	int sc_nvm_k1_enabled;
    570 
    571 	int sc_nqueues;
    572 	struct wm_queue *sc_queue;
    573 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    574 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    575 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    576 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    577 	struct workqueue *sc_queue_wq;
    578 	bool sc_txrx_use_workqueue;
    579 
    580 	int sc_affinity_offset;
    581 
    582 #ifdef WM_EVENT_COUNTERS
    583 	/* Event counters. */
    584 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    585 
    586 	/* >= WM_T_82542_2_1 */
    587 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    588 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    589 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    590 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    591 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    592 
    593 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    594 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    595 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    596 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    597 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    598 	struct evcnt sc_ev_colc;	/* Collision */
    599 	struct evcnt sc_ev_sec;		/* Sequence Error */
    600 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    601 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    602 	struct evcnt sc_ev_scc;		/* Single Collision */
    603 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    604 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    605 	struct evcnt sc_ev_latecol;	/* Late Collision */
    606 	struct evcnt sc_ev_dc;		/* Defer */
    607 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    608 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    609 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    610 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    611 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    612 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    613 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    614 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    615 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    616 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    617 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    618 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    619 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    620 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    621 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    622 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    623 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx Count */
    624 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    625 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    626 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    627 	struct evcnt sc_ev_prc511;	/* Packets Rx (255-511 bytes) */
    628 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    629 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    630 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    631 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    632 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    633 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    634 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    635 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    636 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    637 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    638 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    639 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    640 	struct evcnt sc_ev_ictxact;	/* Intr. Cause Tx Abs Timer Expire */
    641 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    642 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    643 	struct evcnt sc_ev_icrxdmtc;	/* Intr. Cause Rx Desc Min Thresh */
    644 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    645 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    646 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    647 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    648 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    649 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    650 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    651 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    652 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    653 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    654 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    655 
    656 #endif /* WM_EVENT_COUNTERS */
    657 
    658 	struct sysctllog *sc_sysctllog;
    659 
    660 	/* This variable are used only on the 82547. */
    661 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    662 
    663 	uint32_t sc_ctrl;		/* prototype CTRL register */
    664 #if 0
    665 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    666 #endif
    667 	uint32_t sc_icr;		/* prototype interrupt bits */
    668 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    669 	uint32_t sc_tctl;		/* prototype TCTL register */
    670 	uint32_t sc_rctl;		/* prototype RCTL register */
    671 	uint32_t sc_txcw;		/* prototype TXCW register */
    672 	uint32_t sc_tipg;		/* prototype TIPG register */
    673 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    674 	uint32_t sc_pba;		/* prototype PBA register */
    675 
    676 	int sc_tbi_linkup;		/* TBI link status */
    677 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    678 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    679 
    680 	int sc_mchash_type;		/* multicast filter offset */
    681 
    682 	krndsource_t rnd_source;	/* random source */
    683 
    684 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    685 
    686 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    687 	kmutex_t *sc_ich_phymtx;	/*
    688 					 * 82574/82583/ICH/PCH specific PHY
    689 					 * mutex. For 82574/82583, the mutex
    690 					 * is used for both PHY and NVM.
    691 					 */
    692 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    693 
    694 	struct wm_phyop phy;
    695 	struct wm_nvmop nvm;
    696 
    697 	struct workqueue *sc_reset_wq;
    698 	struct work sc_reset_work;
    699 	volatile unsigned sc_reset_pending;
    700 
    701 	bool sc_dying;
    702 
    703 #ifdef WM_DEBUG
    704 	uint32_t sc_debug;
    705 	bool sc_trigger_reset;
    706 #endif
    707 };
    708 
    709 #define	WM_RXCHAIN_RESET(rxq)						\
    710 do {									\
    711 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    712 	*(rxq)->rxq_tailp = NULL;					\
    713 	(rxq)->rxq_len = 0;						\
    714 } while (/*CONSTCOND*/0)
    715 
    716 #define	WM_RXCHAIN_LINK(rxq, m)						\
    717 do {									\
    718 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    719 	(rxq)->rxq_tailp = &(m)->m_next;				\
    720 } while (/*CONSTCOND*/0)
    721 
    722 #ifdef WM_EVENT_COUNTERS
    723 #ifdef __HAVE_ATOMIC64_LOADSTORE
    724 #define	WM_EVCNT_INCR(ev)						\
    725 	atomic_store_relaxed(&((ev)->ev_count),				\
    726 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    727 #define	WM_EVCNT_ADD(ev, val)						\
    728 	atomic_store_relaxed(&((ev)->ev_count),				\
    729 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    730 #else
    731 #define	WM_EVCNT_INCR(ev)						\
    732 	((ev)->ev_count)++
    733 #define	WM_EVCNT_ADD(ev, val)						\
    734 	(ev)->ev_count += (val)
    735 #endif
    736 
    737 #define WM_Q_EVCNT_INCR(qname, evname)			\
    738 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    739 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    740 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    741 #else /* !WM_EVENT_COUNTERS */
    742 #define	WM_EVCNT_INCR(ev)	/* nothing */
    743 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    744 
    745 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    746 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    747 #endif /* !WM_EVENT_COUNTERS */
    748 
    749 #define	CSR_READ(sc, reg)						\
    750 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    751 #define	CSR_WRITE(sc, reg, val)						\
    752 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    753 #define	CSR_WRITE_FLUSH(sc)						\
    754 	(void)CSR_READ((sc), WMREG_STATUS)
    755 
    756 #define ICH8_FLASH_READ32(sc, reg)					\
    757 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    758 	    (reg) + sc->sc_flashreg_offset)
    759 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    760 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    761 	    (reg) + sc->sc_flashreg_offset, (data))
    762 
    763 #define ICH8_FLASH_READ16(sc, reg)					\
    764 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    765 	    (reg) + sc->sc_flashreg_offset)
    766 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    767 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    768 	    (reg) + sc->sc_flashreg_offset, (data))
    769 
    770 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    771 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    772 
    773 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    774 #define	WM_CDTXADDR_HI(txq, x)						\
    775 	(sizeof(bus_addr_t) == 8 ?					\
    776 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    777 
    778 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    779 #define	WM_CDRXADDR_HI(rxq, x)						\
    780 	(sizeof(bus_addr_t) == 8 ?					\
    781 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    782 
    783 /*
    784  * Register read/write functions.
    785  * Other than CSR_{READ|WRITE}().
    786  */
    787 #if 0
    788 static inline uint32_t wm_io_read(struct wm_softc *, int);
    789 #endif
    790 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    791 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    792     uint32_t, uint32_t);
    793 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    794 
    795 /*
    796  * Descriptor sync/init functions.
    797  */
    798 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    799 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    800 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    801 
    802 /*
    803  * Device driver interface functions and commonly used functions.
    804  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    805  */
    806 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    807 static int	wm_match(device_t, cfdata_t, void *);
    808 static void	wm_attach(device_t, device_t, void *);
    809 static int	wm_detach(device_t, int);
    810 static bool	wm_suspend(device_t, const pmf_qual_t *);
    811 static bool	wm_resume(device_t, const pmf_qual_t *);
    812 static bool	wm_watchdog(struct ifnet *);
    813 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    814     uint16_t *);
    815 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    816     uint16_t *);
    817 static void	wm_tick(void *);
    818 static int	wm_ifflags_cb(struct ethercom *);
    819 static int	wm_ioctl(struct ifnet *, u_long, void *);
    820 /* MAC address related */
    821 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    822 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    823 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    824 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    825 static int	wm_rar_count(struct wm_softc *);
    826 static void	wm_set_filter(struct wm_softc *);
    827 /* Reset and init related */
    828 static void	wm_set_vlan(struct wm_softc *);
    829 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    830 static void	wm_get_auto_rd_done(struct wm_softc *);
    831 static void	wm_lan_init_done(struct wm_softc *);
    832 static void	wm_get_cfg_done(struct wm_softc *);
    833 static int	wm_phy_post_reset(struct wm_softc *);
    834 static int	wm_write_smbus_addr(struct wm_softc *);
    835 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    836 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    837 static void	wm_initialize_hardware_bits(struct wm_softc *);
    838 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    839 static int	wm_reset_phy(struct wm_softc *);
    840 static void	wm_flush_desc_rings(struct wm_softc *);
    841 static void	wm_reset(struct wm_softc *);
    842 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    843 static void	wm_rxdrain(struct wm_rxqueue *);
    844 static void	wm_init_rss(struct wm_softc *);
    845 static void	wm_adjust_qnum(struct wm_softc *, int);
    846 static inline bool	wm_is_using_msix(struct wm_softc *);
    847 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    848 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    849 static int	wm_setup_legacy(struct wm_softc *);
    850 static int	wm_setup_msix(struct wm_softc *);
    851 static int	wm_init(struct ifnet *);
    852 static int	wm_init_locked(struct ifnet *);
    853 static void	wm_init_sysctls(struct wm_softc *);
    854 static void	wm_unset_stopping_flags(struct wm_softc *);
    855 static void	wm_set_stopping_flags(struct wm_softc *);
    856 static void	wm_stop(struct ifnet *, int);
    857 static void	wm_stop_locked(struct ifnet *, bool, bool);
    858 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    859 static void	wm_82547_txfifo_stall(void *);
    860 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    861 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    862 /* DMA related */
    863 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    864 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    865 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    866 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    867     struct wm_txqueue *);
    868 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    869 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    870 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    871     struct wm_rxqueue *);
    872 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    873 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    874 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    875 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    876 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    877 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    878 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    879     struct wm_txqueue *);
    880 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    881     struct wm_rxqueue *);
    882 static int	wm_alloc_txrx_queues(struct wm_softc *);
    883 static void	wm_free_txrx_queues(struct wm_softc *);
    884 static int	wm_init_txrx_queues(struct wm_softc *);
    885 /* Start */
    886 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    887     struct wm_txsoft *, uint32_t *, uint8_t *);
    888 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    889 static void	wm_start(struct ifnet *);
    890 static void	wm_start_locked(struct ifnet *);
    891 static int	wm_transmit(struct ifnet *, struct mbuf *);
    892 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    893 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    894 		    bool);
    895 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    896     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    897 static void	wm_nq_start(struct ifnet *);
    898 static void	wm_nq_start_locked(struct ifnet *);
    899 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    900 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    901 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    902 		    bool);
    903 static void	wm_deferred_start_locked(struct wm_txqueue *);
    904 static void	wm_handle_queue(void *);
    905 static void	wm_handle_queue_work(struct work *, void *);
    906 static void	wm_handle_reset_work(struct work *, void *);
    907 /* Interrupt */
    908 static bool	wm_txeof(struct wm_txqueue *, u_int);
    909 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    910 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    911 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    912 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    913 static void	wm_linkintr(struct wm_softc *, uint32_t);
    914 static int	wm_intr_legacy(void *);
    915 static inline void	wm_txrxintr_disable(struct wm_queue *);
    916 static inline void	wm_txrxintr_enable(struct wm_queue *);
    917 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    918 static int	wm_txrxintr_msix(void *);
    919 static int	wm_linkintr_msix(void *);
    920 
    921 /*
    922  * Media related.
    923  * GMII, SGMII, TBI, SERDES and SFP.
    924  */
    925 /* Common */
    926 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    927 /* GMII related */
    928 static void	wm_gmii_reset(struct wm_softc *);
    929 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    930 static int	wm_get_phy_id_82575(struct wm_softc *);
    931 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    932 static int	wm_gmii_mediachange(struct ifnet *);
    933 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    934 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    935 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    936 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    937 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    938 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    939 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    940 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    941 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    942 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    943 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    944 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    945 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    946 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    947 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    948 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    949 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    950 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    951 	bool);
    952 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    953 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    954 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    955 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    956 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    957 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    958 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    959 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    960 static void	wm_gmii_statchg(struct ifnet *);
    961 /*
    962  * kumeran related (80003, ICH* and PCH*).
    963  * These functions are not for accessing MII registers but for accessing
    964  * kumeran specific registers.
    965  */
    966 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    967 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    968 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    969 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    970 /* EMI register related */
    971 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    972 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    973 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    974 /* SGMII */
    975 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    976 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    977 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    978 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    979 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    980 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    981 /* TBI related */
    982 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    983 static void	wm_tbi_mediainit(struct wm_softc *);
    984 static int	wm_tbi_mediachange(struct ifnet *);
    985 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    986 static int	wm_check_for_link(struct wm_softc *);
    987 static void	wm_tbi_tick(struct wm_softc *);
    988 /* SERDES related */
    989 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    990 static int	wm_serdes_mediachange(struct ifnet *);
    991 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    992 static void	wm_serdes_tick(struct wm_softc *);
    993 /* SFP related */
    994 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    995 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    996 
    997 /*
    998  * NVM related.
    999  * Microwire, SPI (w/wo EERD) and Flash.
   1000  */
   1001 /* Misc functions */
   1002 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1003 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1004 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1005 /* Microwire */
   1006 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1007 /* SPI */
   1008 static int	wm_nvm_ready_spi(struct wm_softc *);
   1009 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1010 /* Using with EERD */
   1011 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1012 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1013 /* Flash */
   1014 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1015     unsigned int *);
   1016 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1017 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1018 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1019     uint32_t *);
   1020 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1021 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1022 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1023 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1024 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1025 /* iNVM */
   1026 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1027 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1028 /* Lock, detecting NVM type, validate checksum and read */
   1029 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1030 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1031 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1032 static void	wm_nvm_version_invm(struct wm_softc *);
   1033 static void	wm_nvm_version(struct wm_softc *);
   1034 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1035 
   1036 /*
   1037  * Hardware semaphores.
   1038  * Very complexed...
   1039  */
   1040 static int	wm_get_null(struct wm_softc *);
   1041 static void	wm_put_null(struct wm_softc *);
   1042 static int	wm_get_eecd(struct wm_softc *);
   1043 static void	wm_put_eecd(struct wm_softc *);
   1044 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1045 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1046 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1047 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1048 static int	wm_get_nvm_80003(struct wm_softc *);
   1049 static void	wm_put_nvm_80003(struct wm_softc *);
   1050 static int	wm_get_nvm_82571(struct wm_softc *);
   1051 static void	wm_put_nvm_82571(struct wm_softc *);
   1052 static int	wm_get_phy_82575(struct wm_softc *);
   1053 static void	wm_put_phy_82575(struct wm_softc *);
   1054 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1055 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1056 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1057 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1058 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1059 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1060 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1061 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1062 
   1063 /*
   1064  * Management mode and power management related subroutines.
   1065  * BMC, AMT, suspend/resume and EEE.
   1066  */
   1067 #if 0
   1068 static int	wm_check_mng_mode(struct wm_softc *);
   1069 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1070 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1071 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1072 #endif
   1073 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1074 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1075 static void	wm_get_hw_control(struct wm_softc *);
   1076 static void	wm_release_hw_control(struct wm_softc *);
   1077 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1078 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1079 static void	wm_init_manageability(struct wm_softc *);
   1080 static void	wm_release_manageability(struct wm_softc *);
   1081 static void	wm_get_wakeup(struct wm_softc *);
   1082 static int	wm_ulp_disable(struct wm_softc *);
   1083 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1084 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1085 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1086 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1087 static void	wm_enable_wakeup(struct wm_softc *);
   1088 static void	wm_disable_aspm(struct wm_softc *);
   1089 /* LPLU (Low Power Link Up) */
   1090 static void	wm_lplu_d0_disable(struct wm_softc *);
   1091 /* EEE */
   1092 static int	wm_set_eee_i350(struct wm_softc *);
   1093 static int	wm_set_eee_pchlan(struct wm_softc *);
   1094 static int	wm_set_eee(struct wm_softc *);
   1095 
   1096 /*
   1097  * Workarounds (mainly PHY related).
   1098  * Basically, PHY's workarounds are in the PHY drivers.
   1099  */
   1100 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1101 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1102 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1103 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1104 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1105 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1106 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1107 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1108 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1109 static int	wm_k1_workaround_lv(struct wm_softc *);
   1110 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1111 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1112 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1113 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1114 static void	wm_reset_init_script_82575(struct wm_softc *);
   1115 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1116 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1117 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1118 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1119 static int	wm_pll_workaround_i210(struct wm_softc *);
   1120 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1121 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1122 static void	wm_set_linkdown_discard(struct wm_softc *);
   1123 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1124 
   1125 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1126 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1127 #ifdef WM_DEBUG
   1128 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1129 #endif
   1130 
   1131 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1132     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1133 
   1134 /*
   1135  * Devices supported by this driver.
   1136  */
   1137 static const struct wm_product {
   1138 	pci_vendor_id_t		wmp_vendor;
   1139 	pci_product_id_t	wmp_product;
   1140 	const char		*wmp_name;
   1141 	wm_chip_type		wmp_type;
   1142 	uint32_t		wmp_flags;
   1143 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1144 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1145 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1146 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1147 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1148 } wm_products[] = {
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1150 	  "Intel i82542 1000BASE-X Ethernet",
   1151 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1154 	  "Intel i82543GC 1000BASE-X Ethernet",
   1155 	  WM_T_82543,		WMP_F_FIBER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1158 	  "Intel i82543GC 1000BASE-T Ethernet",
   1159 	  WM_T_82543,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1162 	  "Intel i82544EI 1000BASE-T Ethernet",
   1163 	  WM_T_82544,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1166 	  "Intel i82544EI 1000BASE-X Ethernet",
   1167 	  WM_T_82544,		WMP_F_FIBER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1170 	  "Intel i82544GC 1000BASE-T Ethernet",
   1171 	  WM_T_82544,		WMP_F_COPPER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1174 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1175 	  WM_T_82544,		WMP_F_COPPER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1178 	  "Intel i82540EM 1000BASE-T Ethernet",
   1179 	  WM_T_82540,		WMP_F_COPPER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1182 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1183 	  WM_T_82540,		WMP_F_COPPER },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1186 	  "Intel i82540EP 1000BASE-T Ethernet",
   1187 	  WM_T_82540,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1190 	  "Intel i82540EP 1000BASE-T Ethernet",
   1191 	  WM_T_82540,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1194 	  "Intel i82540EP 1000BASE-T Ethernet",
   1195 	  WM_T_82540,		WMP_F_COPPER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1198 	  "Intel i82545EM 1000BASE-T Ethernet",
   1199 	  WM_T_82545,		WMP_F_COPPER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1202 	  "Intel i82545GM 1000BASE-T Ethernet",
   1203 	  WM_T_82545_3,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1206 	  "Intel i82545GM 1000BASE-X Ethernet",
   1207 	  WM_T_82545_3,		WMP_F_FIBER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1210 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1211 	  WM_T_82545_3,		WMP_F_SERDES },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1214 	  "Intel i82546EB 1000BASE-T Ethernet",
   1215 	  WM_T_82546,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1218 	  "Intel i82546EB 1000BASE-T Ethernet",
   1219 	  WM_T_82546,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1222 	  "Intel i82545EM 1000BASE-X Ethernet",
   1223 	  WM_T_82545,		WMP_F_FIBER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1226 	  "Intel i82546EB 1000BASE-X Ethernet",
   1227 	  WM_T_82546,		WMP_F_FIBER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1230 	  "Intel i82546GB 1000BASE-T Ethernet",
   1231 	  WM_T_82546_3,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1234 	  "Intel i82546GB 1000BASE-X Ethernet",
   1235 	  WM_T_82546_3,		WMP_F_FIBER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1238 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1239 	  WM_T_82546_3,		WMP_F_SERDES },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1242 	  "i82546GB quad-port Gigabit Ethernet",
   1243 	  WM_T_82546_3,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1246 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1247 	  WM_T_82546_3,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1250 	  "Intel PRO/1000MT (82546GB)",
   1251 	  WM_T_82546_3,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1254 	  "Intel i82541EI 1000BASE-T Ethernet",
   1255 	  WM_T_82541,		WMP_F_COPPER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1258 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1259 	  WM_T_82541,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1262 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1263 	  WM_T_82541,		WMP_F_COPPER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1266 	  "Intel i82541ER 1000BASE-T Ethernet",
   1267 	  WM_T_82541_2,		WMP_F_COPPER },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1270 	  "Intel i82541GI 1000BASE-T Ethernet",
   1271 	  WM_T_82541_2,		WMP_F_COPPER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1274 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1275 	  WM_T_82541_2,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1278 	  "Intel i82541PI 1000BASE-T Ethernet",
   1279 	  WM_T_82541_2,		WMP_F_COPPER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1282 	  "Intel i82547EI 1000BASE-T Ethernet",
   1283 	  WM_T_82547,		WMP_F_COPPER },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1286 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1287 	  WM_T_82547,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1290 	  "Intel i82547GI 1000BASE-T Ethernet",
   1291 	  WM_T_82547_2,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1294 	  "Intel PRO/1000 PT (82571EB)",
   1295 	  WM_T_82571,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1298 	  "Intel PRO/1000 PF (82571EB)",
   1299 	  WM_T_82571,		WMP_F_FIBER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1302 	  "Intel PRO/1000 PB (82571EB)",
   1303 	  WM_T_82571,		WMP_F_SERDES },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1306 	  "Intel PRO/1000 QT (82571EB)",
   1307 	  WM_T_82571,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1310 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1311 	  WM_T_82571,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1314 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1315 	  WM_T_82571,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1318 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1319 	  WM_T_82571,		WMP_F_SERDES },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1322 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1323 	  WM_T_82571,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1326 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1327 	  WM_T_82571,		WMP_F_FIBER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1330 	  "Intel i82572EI 1000baseT Ethernet",
   1331 	  WM_T_82572,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1334 	  "Intel i82572EI 1000baseX Ethernet",
   1335 	  WM_T_82572,		WMP_F_FIBER },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1338 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1339 	  WM_T_82572,		WMP_F_SERDES },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1342 	  "Intel i82572EI 1000baseT Ethernet",
   1343 	  WM_T_82572,		WMP_F_COPPER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1346 	  "Intel i82573E",
   1347 	  WM_T_82573,		WMP_F_COPPER },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1350 	  "Intel i82573E IAMT",
   1351 	  WM_T_82573,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1354 	  "Intel i82573L Gigabit Ethernet",
   1355 	  WM_T_82573,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1358 	  "Intel i82574L",
   1359 	  WM_T_82574,		WMP_F_COPPER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1362 	  "Intel i82574L",
   1363 	  WM_T_82574,		WMP_F_COPPER },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1366 	  "Intel i82583V",
   1367 	  WM_T_82583,		WMP_F_COPPER },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1370 	  "i80003 dual 1000baseT Ethernet",
   1371 	  WM_T_80003,		WMP_F_COPPER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1374 	  "i80003 dual 1000baseX Ethernet",
   1375 	  WM_T_80003,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1378 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1379 	  WM_T_80003,		WMP_F_SERDES },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1382 	  "Intel i80003 1000baseT Ethernet",
   1383 	  WM_T_80003,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1386 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1387 	  WM_T_80003,		WMP_F_SERDES },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1390 	  "Intel i82801H (M_AMT) LAN Controller",
   1391 	  WM_T_ICH8,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1393 	  "Intel i82801H (AMT) LAN Controller",
   1394 	  WM_T_ICH8,		WMP_F_COPPER },
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1396 	  "Intel i82801H LAN Controller",
   1397 	  WM_T_ICH8,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1399 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1400 	  WM_T_ICH8,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1402 	  "Intel i82801H (M) LAN Controller",
   1403 	  WM_T_ICH8,		WMP_F_COPPER },
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1405 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1406 	  WM_T_ICH8,		WMP_F_COPPER },
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1408 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1409 	  WM_T_ICH8,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1411 	  "82567V-3 LAN Controller",
   1412 	  WM_T_ICH8,		WMP_F_COPPER },
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1414 	  "82801I (AMT) LAN Controller",
   1415 	  WM_T_ICH9,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1417 	  "82801I 10/100 LAN Controller",
   1418 	  WM_T_ICH9,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1420 	  "82801I (G) 10/100 LAN Controller",
   1421 	  WM_T_ICH9,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1423 	  "82801I (GT) 10/100 LAN Controller",
   1424 	  WM_T_ICH9,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1426 	  "82801I (C) LAN Controller",
   1427 	  WM_T_ICH9,		WMP_F_COPPER },
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1429 	  "82801I mobile LAN Controller",
   1430 	  WM_T_ICH9,		WMP_F_COPPER },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1432 	  "82801I mobile (V) LAN Controller",
   1433 	  WM_T_ICH9,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1435 	  "82801I mobile (AMT) LAN Controller",
   1436 	  WM_T_ICH9,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1438 	  "82567LM-4 LAN Controller",
   1439 	  WM_T_ICH9,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1441 	  "82567LM-2 LAN Controller",
   1442 	  WM_T_ICH10,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1444 	  "82567LF-2 LAN Controller",
   1445 	  WM_T_ICH10,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1447 	  "82567LM-3 LAN Controller",
   1448 	  WM_T_ICH10,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1450 	  "82567LF-3 LAN Controller",
   1451 	  WM_T_ICH10,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1453 	  "82567V-2 LAN Controller",
   1454 	  WM_T_ICH10,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1456 	  "82567V-3? LAN Controller",
   1457 	  WM_T_ICH10,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1459 	  "HANKSVILLE LAN Controller",
   1460 	  WM_T_ICH10,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1462 	  "PCH LAN (82577LM) Controller",
   1463 	  WM_T_PCH,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1465 	  "PCH LAN (82577LC) Controller",
   1466 	  WM_T_PCH,		WMP_F_COPPER },
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1468 	  "PCH LAN (82578DM) Controller",
   1469 	  WM_T_PCH,		WMP_F_COPPER },
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1471 	  "PCH LAN (82578DC) Controller",
   1472 	  WM_T_PCH,		WMP_F_COPPER },
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1474 	  "PCH2 LAN (82579LM) Controller",
   1475 	  WM_T_PCH2,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1477 	  "PCH2 LAN (82579V) Controller",
   1478 	  WM_T_PCH2,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1480 	  "82575EB dual-1000baseT Ethernet",
   1481 	  WM_T_82575,		WMP_F_COPPER },
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1483 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1484 	  WM_T_82575,		WMP_F_SERDES },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1486 	  "82575GB quad-1000baseT Ethernet",
   1487 	  WM_T_82575,		WMP_F_COPPER },
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1489 	  "82575GB quad-1000baseT Ethernet (PM)",
   1490 	  WM_T_82575,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1492 	  "82576 1000BaseT Ethernet",
   1493 	  WM_T_82576,		WMP_F_COPPER },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1495 	  "82576 1000BaseX Ethernet",
   1496 	  WM_T_82576,		WMP_F_FIBER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1499 	  "82576 gigabit Ethernet (SERDES)",
   1500 	  WM_T_82576,		WMP_F_SERDES },
   1501 
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1503 	  "82576 quad-1000BaseT Ethernet",
   1504 	  WM_T_82576,		WMP_F_COPPER },
   1505 
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1507 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1508 	  WM_T_82576,		WMP_F_COPPER },
   1509 
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1511 	  "82576 gigabit Ethernet",
   1512 	  WM_T_82576,		WMP_F_COPPER },
   1513 
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1515 	  "82576 gigabit Ethernet (SERDES)",
   1516 	  WM_T_82576,		WMP_F_SERDES },
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1518 	  "82576 quad-gigabit Ethernet (SERDES)",
   1519 	  WM_T_82576,		WMP_F_SERDES },
   1520 
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1522 	  "82580 1000BaseT Ethernet",
   1523 	  WM_T_82580,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1525 	  "82580 1000BaseX Ethernet",
   1526 	  WM_T_82580,		WMP_F_FIBER },
   1527 
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1529 	  "82580 1000BaseT Ethernet (SERDES)",
   1530 	  WM_T_82580,		WMP_F_SERDES },
   1531 
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1533 	  "82580 gigabit Ethernet (SGMII)",
   1534 	  WM_T_82580,		WMP_F_COPPER },
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1536 	  "82580 dual-1000BaseT Ethernet",
   1537 	  WM_T_82580,		WMP_F_COPPER },
   1538 
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1540 	  "82580 quad-1000BaseX Ethernet",
   1541 	  WM_T_82580,		WMP_F_FIBER },
   1542 
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1544 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1545 	  WM_T_82580,		WMP_F_COPPER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1548 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1549 	  WM_T_82580,		WMP_F_SERDES },
   1550 
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1552 	  "DH89XXCC 1000BASE-KX Ethernet",
   1553 	  WM_T_82580,		WMP_F_SERDES },
   1554 
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1556 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1557 	  WM_T_82580,		WMP_F_SERDES },
   1558 
   1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1560 	  "I350 Gigabit Network Connection",
   1561 	  WM_T_I350,		WMP_F_COPPER },
   1562 
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1564 	  "I350 Gigabit Fiber Network Connection",
   1565 	  WM_T_I350,		WMP_F_FIBER },
   1566 
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1568 	  "I350 Gigabit Backplane Connection",
   1569 	  WM_T_I350,		WMP_F_SERDES },
   1570 
   1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1572 	  "I350 Quad Port Gigabit Ethernet",
   1573 	  WM_T_I350,		WMP_F_SERDES },
   1574 
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1576 	  "I350 Gigabit Connection",
   1577 	  WM_T_I350,		WMP_F_COPPER },
   1578 
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1580 	  "I354 Gigabit Ethernet (KX)",
   1581 	  WM_T_I354,		WMP_F_SERDES },
   1582 
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1584 	  "I354 Gigabit Ethernet (SGMII)",
   1585 	  WM_T_I354,		WMP_F_COPPER },
   1586 
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1588 	  "I354 Gigabit Ethernet (2.5G)",
   1589 	  WM_T_I354,		WMP_F_COPPER },
   1590 
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1592 	  "I210-T1 Ethernet Server Adapter",
   1593 	  WM_T_I210,		WMP_F_COPPER },
   1594 
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1596 	  "I210 Ethernet (Copper OEM)",
   1597 	  WM_T_I210,		WMP_F_COPPER },
   1598 
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1600 	  "I210 Ethernet (Copper IT)",
   1601 	  WM_T_I210,		WMP_F_COPPER },
   1602 
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1604 	  "I210 Ethernet (Copper, FLASH less)",
   1605 	  WM_T_I210,		WMP_F_COPPER },
   1606 
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1608 	  "I210 Gigabit Ethernet (Fiber)",
   1609 	  WM_T_I210,		WMP_F_FIBER },
   1610 
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1612 	  "I210 Gigabit Ethernet (SERDES)",
   1613 	  WM_T_I210,		WMP_F_SERDES },
   1614 
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1616 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1617 	  WM_T_I210,		WMP_F_SERDES },
   1618 
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1620 	  "I210 Gigabit Ethernet (SGMII)",
   1621 	  WM_T_I210,		WMP_F_COPPER },
   1622 
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1624 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1625 	  WM_T_I210,		WMP_F_COPPER },
   1626 
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1628 	  "I211 Ethernet (COPPER)",
   1629 	  WM_T_I211,		WMP_F_COPPER },
   1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1631 	  "I217 V Ethernet Connection",
   1632 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1634 	  "I217 LM Ethernet Connection",
   1635 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1637 	  "I218 V Ethernet Connection",
   1638 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1640 	  "I218 V Ethernet Connection",
   1641 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1643 	  "I218 V Ethernet Connection",
   1644 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1646 	  "I218 LM Ethernet Connection",
   1647 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1648 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1649 	  "I218 LM Ethernet Connection",
   1650 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1652 	  "I218 LM Ethernet Connection",
   1653 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1655 	  "I219 LM Ethernet Connection",
   1656 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1658 	  "I219 LM (2) Ethernet Connection",
   1659 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1661 	  "I219 LM (3) Ethernet Connection",
   1662 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1664 	  "I219 LM (4) Ethernet Connection",
   1665 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1667 	  "I219 LM (5) Ethernet Connection",
   1668 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1670 	  "I219 LM (6) Ethernet Connection",
   1671 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1673 	  "I219 LM (7) Ethernet Connection",
   1674 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1675 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1676 	  "I219 LM (8) Ethernet Connection",
   1677 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1679 	  "I219 LM (9) Ethernet Connection",
   1680 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1682 	  "I219 LM (10) Ethernet Connection",
   1683 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1685 	  "I219 LM (11) Ethernet Connection",
   1686 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1687 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1688 	  "I219 LM (12) Ethernet Connection",
   1689 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1691 	  "I219 LM (13) Ethernet Connection",
   1692 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1694 	  "I219 LM (14) Ethernet Connection",
   1695 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1697 	  "I219 LM (15) Ethernet Connection",
   1698 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1699 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1700 	  "I219 LM (16) Ethernet Connection",
   1701 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1703 	  "I219 LM (17) Ethernet Connection",
   1704 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1706 	  "I219 LM (18) Ethernet Connection",
   1707 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1709 	  "I219 LM (19) Ethernet Connection",
   1710 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1711 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1712 	  "I219 V Ethernet Connection",
   1713 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1714 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1715 	  "I219 V (2) Ethernet Connection",
   1716 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1718 	  "I219 V (4) Ethernet Connection",
   1719 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1720 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1721 	  "I219 V (5) Ethernet Connection",
   1722 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1723 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1724 	  "I219 V (6) Ethernet Connection",
   1725 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1727 	  "I219 V (7) Ethernet Connection",
   1728 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1730 	  "I219 V (8) Ethernet Connection",
   1731 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1732 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1733 	  "I219 V (9) Ethernet Connection",
   1734 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1735 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1736 	  "I219 V (10) Ethernet Connection",
   1737 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1739 	  "I219 V (11) Ethernet Connection",
   1740 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1742 	  "I219 V (12) Ethernet Connection",
   1743 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1744 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1745 	  "I219 V (13) Ethernet Connection",
   1746 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1747 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1748 	  "I219 V (14) Ethernet Connection",
   1749 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1751 	  "I219 V (15) Ethernet Connection",
   1752 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1754 	  "I219 V (16) Ethernet Connection",
   1755 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1757 	  "I219 V (17) Ethernet Connection",
   1758 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1759 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1760 	  "I219 V (18) Ethernet Connection",
   1761 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1763 	  "I219 V (19) Ethernet Connection",
   1764 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1765 	{ 0,			0,
   1766 	  NULL,
   1767 	  0,			0 },
   1768 };
   1769 
   1770 /*
   1771  * Register read/write functions.
   1772  * Other than CSR_{READ|WRITE}().
   1773  */
   1774 
   1775 #if 0 /* Not currently used */
   1776 static inline uint32_t
   1777 wm_io_read(struct wm_softc *sc, int reg)
   1778 {
   1779 
   1780 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1781 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1782 }
   1783 #endif
   1784 
   1785 static inline void
   1786 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1787 {
   1788 
   1789 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1790 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1791 }
   1792 
   1793 static inline void
   1794 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1795     uint32_t data)
   1796 {
   1797 	uint32_t regval;
   1798 	int i;
   1799 
   1800 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1801 
   1802 	CSR_WRITE(sc, reg, regval);
   1803 
   1804 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1805 		delay(5);
   1806 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1807 			break;
   1808 	}
   1809 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1810 		aprint_error("%s: WARNING:"
   1811 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1812 		    device_xname(sc->sc_dev), reg);
   1813 	}
   1814 }
   1815 
   1816 static inline void
   1817 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1818 {
   1819 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1820 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1821 }
   1822 
   1823 /*
   1824  * Descriptor sync/init functions.
   1825  */
   1826 static inline void
   1827 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1828 {
   1829 	struct wm_softc *sc = txq->txq_sc;
   1830 
   1831 	/* If it will wrap around, sync to the end of the ring. */
   1832 	if ((start + num) > WM_NTXDESC(txq)) {
   1833 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1834 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1835 		    (WM_NTXDESC(txq) - start), ops);
   1836 		num -= (WM_NTXDESC(txq) - start);
   1837 		start = 0;
   1838 	}
   1839 
   1840 	/* Now sync whatever is left. */
   1841 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1842 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1843 }
   1844 
   1845 static inline void
   1846 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1847 {
   1848 	struct wm_softc *sc = rxq->rxq_sc;
   1849 
   1850 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1851 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1852 }
   1853 
   1854 static inline void
   1855 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1856 {
   1857 	struct wm_softc *sc = rxq->rxq_sc;
   1858 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1859 	struct mbuf *m = rxs->rxs_mbuf;
   1860 
   1861 	/*
   1862 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1863 	 * so that the payload after the Ethernet header is aligned
   1864 	 * to a 4-byte boundary.
   1865 
   1866 	 * XXX BRAINDAMAGE ALERT!
   1867 	 * The stupid chip uses the same size for every buffer, which
   1868 	 * is set in the Receive Control register.  We are using the 2K
   1869 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1870 	 * reason, we can't "scoot" packets longer than the standard
   1871 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1872 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1873 	 * the upper layer copy the headers.
   1874 	 */
   1875 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1876 
   1877 	if (sc->sc_type == WM_T_82574) {
   1878 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1879 		rxd->erx_data.erxd_addr =
   1880 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1881 		rxd->erx_data.erxd_dd = 0;
   1882 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1883 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1884 
   1885 		rxd->nqrx_data.nrxd_paddr =
   1886 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1887 		/* Currently, split header is not supported. */
   1888 		rxd->nqrx_data.nrxd_haddr = 0;
   1889 	} else {
   1890 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1891 
   1892 		wm_set_dma_addr(&rxd->wrx_addr,
   1893 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1894 		rxd->wrx_len = 0;
   1895 		rxd->wrx_cksum = 0;
   1896 		rxd->wrx_status = 0;
   1897 		rxd->wrx_errors = 0;
   1898 		rxd->wrx_special = 0;
   1899 	}
   1900 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1901 
   1902 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1903 }
   1904 
   1905 /*
   1906  * Device driver interface functions and commonly used functions.
   1907  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1908  */
   1909 
   1910 /* Lookup supported device table */
   1911 static const struct wm_product *
   1912 wm_lookup(const struct pci_attach_args *pa)
   1913 {
   1914 	const struct wm_product *wmp;
   1915 
   1916 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1917 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1918 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1919 			return wmp;
   1920 	}
   1921 	return NULL;
   1922 }
   1923 
   1924 /* The match function (ca_match) */
   1925 static int
   1926 wm_match(device_t parent, cfdata_t cf, void *aux)
   1927 {
   1928 	struct pci_attach_args *pa = aux;
   1929 
   1930 	if (wm_lookup(pa) != NULL)
   1931 		return 1;
   1932 
   1933 	return 0;
   1934 }
   1935 
   1936 /* The attach function (ca_attach) */
   1937 static void
   1938 wm_attach(device_t parent, device_t self, void *aux)
   1939 {
   1940 	struct wm_softc *sc = device_private(self);
   1941 	struct pci_attach_args *pa = aux;
   1942 	prop_dictionary_t dict;
   1943 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1944 	pci_chipset_tag_t pc = pa->pa_pc;
   1945 	int counts[PCI_INTR_TYPE_SIZE];
   1946 	pci_intr_type_t max_type;
   1947 	const char *eetype, *xname;
   1948 	bus_space_tag_t memt;
   1949 	bus_space_handle_t memh;
   1950 	bus_size_t memsize;
   1951 	int memh_valid;
   1952 	int i, error;
   1953 	const struct wm_product *wmp;
   1954 	prop_data_t ea;
   1955 	prop_number_t pn;
   1956 	uint8_t enaddr[ETHER_ADDR_LEN];
   1957 	char buf[256];
   1958 	char wqname[MAXCOMLEN];
   1959 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1960 	pcireg_t preg, memtype;
   1961 	uint16_t eeprom_data, apme_mask;
   1962 	bool force_clear_smbi;
   1963 	uint32_t link_mode;
   1964 	uint32_t reg;
   1965 
   1966 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1967 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1968 #endif
   1969 	sc->sc_dev = self;
   1970 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   1971 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1972 	sc->sc_core_stopping = false;
   1973 
   1974 	wmp = wm_lookup(pa);
   1975 #ifdef DIAGNOSTIC
   1976 	if (wmp == NULL) {
   1977 		printf("\n");
   1978 		panic("wm_attach: impossible");
   1979 	}
   1980 #endif
   1981 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1982 
   1983 	sc->sc_pc = pa->pa_pc;
   1984 	sc->sc_pcitag = pa->pa_tag;
   1985 
   1986 	if (pci_dma64_available(pa)) {
   1987 		aprint_verbose(", 64-bit DMA");
   1988 		sc->sc_dmat = pa->pa_dmat64;
   1989 	} else {
   1990 		aprint_verbose(", 32-bit DMA");
   1991 		sc->sc_dmat = pa->pa_dmat;
   1992 	}
   1993 
   1994 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1995 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1996 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1997 
   1998 	sc->sc_type = wmp->wmp_type;
   1999 
   2000 	/* Set default function pointers */
   2001 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2002 	sc->phy.release = sc->nvm.release = wm_put_null;
   2003 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2004 
   2005 	if (sc->sc_type < WM_T_82543) {
   2006 		if (sc->sc_rev < 2) {
   2007 			aprint_error_dev(sc->sc_dev,
   2008 			    "i82542 must be at least rev. 2\n");
   2009 			return;
   2010 		}
   2011 		if (sc->sc_rev < 3)
   2012 			sc->sc_type = WM_T_82542_2_0;
   2013 	}
   2014 
   2015 	/*
   2016 	 * Disable MSI for Errata:
   2017 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2018 	 *
   2019 	 *  82544: Errata 25
   2020 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2021 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2022 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2023 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2024 	 *
   2025 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2026 	 *
   2027 	 *  82571 & 82572: Errata 63
   2028 	 */
   2029 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2030 	    || (sc->sc_type == WM_T_82572))
   2031 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2032 
   2033 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2034 	    || (sc->sc_type == WM_T_82580)
   2035 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2036 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2037 		sc->sc_flags |= WM_F_NEWQUEUE;
   2038 
   2039 	/* Set device properties (mactype) */
   2040 	dict = device_properties(sc->sc_dev);
   2041 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2042 
   2043 	/*
   2044 	 * Map the device.  All devices support memory-mapped acccess,
   2045 	 * and it is really required for normal operation.
   2046 	 */
   2047 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2048 	switch (memtype) {
   2049 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2050 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2051 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2052 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2053 		break;
   2054 	default:
   2055 		memh_valid = 0;
   2056 		break;
   2057 	}
   2058 
   2059 	if (memh_valid) {
   2060 		sc->sc_st = memt;
   2061 		sc->sc_sh = memh;
   2062 		sc->sc_ss = memsize;
   2063 	} else {
   2064 		aprint_error_dev(sc->sc_dev,
   2065 		    "unable to map device registers\n");
   2066 		return;
   2067 	}
   2068 
   2069 	/*
   2070 	 * In addition, i82544 and later support I/O mapped indirect
   2071 	 * register access.  It is not desirable (nor supported in
   2072 	 * this driver) to use it for normal operation, though it is
   2073 	 * required to work around bugs in some chip versions.
   2074 	 */
   2075 	switch (sc->sc_type) {
   2076 	case WM_T_82544:
   2077 	case WM_T_82541:
   2078 	case WM_T_82541_2:
   2079 	case WM_T_82547:
   2080 	case WM_T_82547_2:
   2081 		/* First we have to find the I/O BAR. */
   2082 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2083 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2084 			if (memtype == PCI_MAPREG_TYPE_IO)
   2085 				break;
   2086 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2087 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2088 				i += 4;	/* skip high bits, too */
   2089 		}
   2090 		if (i < PCI_MAPREG_END) {
   2091 			/*
   2092 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2093 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2094 			 * It's no problem because newer chips has no this
   2095 			 * bug.
   2096 			 *
   2097 			 * The i8254x doesn't apparently respond when the
   2098 			 * I/O BAR is 0, which looks somewhat like it's not
   2099 			 * been configured.
   2100 			 */
   2101 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2102 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2103 				aprint_error_dev(sc->sc_dev,
   2104 				    "WARNING: I/O BAR at zero.\n");
   2105 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2106 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2107 			    == 0) {
   2108 				sc->sc_flags |= WM_F_IOH_VALID;
   2109 			} else
   2110 				aprint_error_dev(sc->sc_dev,
   2111 				    "WARNING: unable to map I/O space\n");
   2112 		}
   2113 		break;
   2114 	default:
   2115 		break;
   2116 	}
   2117 
   2118 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2119 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2120 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2121 	if (sc->sc_type < WM_T_82542_2_1)
   2122 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2123 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2124 
   2125 	/* Power up chip */
   2126 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2127 	    && error != EOPNOTSUPP) {
   2128 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2129 		return;
   2130 	}
   2131 
   2132 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2133 	/*
   2134 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2135 	 * resource.
   2136 	 */
   2137 	if (sc->sc_nqueues > 1) {
   2138 		max_type = PCI_INTR_TYPE_MSIX;
   2139 		/*
   2140 		 *  82583 has a MSI-X capability in the PCI configuration space
   2141 		 * but it doesn't support it. At least the document doesn't
   2142 		 * say anything about MSI-X.
   2143 		 */
   2144 		counts[PCI_INTR_TYPE_MSIX]
   2145 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2146 	} else {
   2147 		max_type = PCI_INTR_TYPE_MSI;
   2148 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2149 	}
   2150 
   2151 	/* Allocation settings */
   2152 	counts[PCI_INTR_TYPE_MSI] = 1;
   2153 	counts[PCI_INTR_TYPE_INTX] = 1;
   2154 	/* overridden by disable flags */
   2155 	if (wm_disable_msi != 0) {
   2156 		counts[PCI_INTR_TYPE_MSI] = 0;
   2157 		if (wm_disable_msix != 0) {
   2158 			max_type = PCI_INTR_TYPE_INTX;
   2159 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2160 		}
   2161 	} else if (wm_disable_msix != 0) {
   2162 		max_type = PCI_INTR_TYPE_MSI;
   2163 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2164 	}
   2165 
   2166 alloc_retry:
   2167 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2168 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2169 		return;
   2170 	}
   2171 
   2172 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2173 		error = wm_setup_msix(sc);
   2174 		if (error) {
   2175 			pci_intr_release(pc, sc->sc_intrs,
   2176 			    counts[PCI_INTR_TYPE_MSIX]);
   2177 
   2178 			/* Setup for MSI: Disable MSI-X */
   2179 			max_type = PCI_INTR_TYPE_MSI;
   2180 			counts[PCI_INTR_TYPE_MSI] = 1;
   2181 			counts[PCI_INTR_TYPE_INTX] = 1;
   2182 			goto alloc_retry;
   2183 		}
   2184 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2185 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2186 		error = wm_setup_legacy(sc);
   2187 		if (error) {
   2188 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2189 			    counts[PCI_INTR_TYPE_MSI]);
   2190 
   2191 			/* The next try is for INTx: Disable MSI */
   2192 			max_type = PCI_INTR_TYPE_INTX;
   2193 			counts[PCI_INTR_TYPE_INTX] = 1;
   2194 			goto alloc_retry;
   2195 		}
   2196 	} else {
   2197 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2198 		error = wm_setup_legacy(sc);
   2199 		if (error) {
   2200 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2201 			    counts[PCI_INTR_TYPE_INTX]);
   2202 			return;
   2203 		}
   2204 	}
   2205 
   2206 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2207 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2208 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2209 	    WQ_PERCPU | WQ_MPSAFE);
   2210 	if (error) {
   2211 		aprint_error_dev(sc->sc_dev,
   2212 		    "unable to create TxRx workqueue\n");
   2213 		goto out;
   2214 	}
   2215 
   2216 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2217 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2218 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2219 	    WQ_MPSAFE);
   2220 	if (error) {
   2221 		workqueue_destroy(sc->sc_queue_wq);
   2222 		aprint_error_dev(sc->sc_dev,
   2223 		    "unable to create reset workqueue\n");
   2224 		goto out;
   2225 	}
   2226 
   2227 	/*
   2228 	 * Check the function ID (unit number of the chip).
   2229 	 */
   2230 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2231 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2232 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2233 	    || (sc->sc_type == WM_T_82580)
   2234 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2235 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2236 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2237 	else
   2238 		sc->sc_funcid = 0;
   2239 
   2240 	/*
   2241 	 * Determine a few things about the bus we're connected to.
   2242 	 */
   2243 	if (sc->sc_type < WM_T_82543) {
   2244 		/* We don't really know the bus characteristics here. */
   2245 		sc->sc_bus_speed = 33;
   2246 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2247 		/*
   2248 		 * CSA (Communication Streaming Architecture) is about as fast
   2249 		 * a 32-bit 66MHz PCI Bus.
   2250 		 */
   2251 		sc->sc_flags |= WM_F_CSA;
   2252 		sc->sc_bus_speed = 66;
   2253 		aprint_verbose_dev(sc->sc_dev,
   2254 		    "Communication Streaming Architecture\n");
   2255 		if (sc->sc_type == WM_T_82547) {
   2256 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2257 			callout_setfunc(&sc->sc_txfifo_ch,
   2258 			    wm_82547_txfifo_stall, sc);
   2259 			aprint_verbose_dev(sc->sc_dev,
   2260 			    "using 82547 Tx FIFO stall work-around\n");
   2261 		}
   2262 	} else if (sc->sc_type >= WM_T_82571) {
   2263 		sc->sc_flags |= WM_F_PCIE;
   2264 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2265 		    && (sc->sc_type != WM_T_ICH10)
   2266 		    && (sc->sc_type != WM_T_PCH)
   2267 		    && (sc->sc_type != WM_T_PCH2)
   2268 		    && (sc->sc_type != WM_T_PCH_LPT)
   2269 		    && (sc->sc_type != WM_T_PCH_SPT)
   2270 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2271 			/* ICH* and PCH* have no PCIe capability registers */
   2272 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2273 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2274 				NULL) == 0)
   2275 				aprint_error_dev(sc->sc_dev,
   2276 				    "unable to find PCIe capability\n");
   2277 		}
   2278 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2279 	} else {
   2280 		reg = CSR_READ(sc, WMREG_STATUS);
   2281 		if (reg & STATUS_BUS64)
   2282 			sc->sc_flags |= WM_F_BUS64;
   2283 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2284 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2285 
   2286 			sc->sc_flags |= WM_F_PCIX;
   2287 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2288 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2289 				aprint_error_dev(sc->sc_dev,
   2290 				    "unable to find PCIX capability\n");
   2291 			else if (sc->sc_type != WM_T_82545_3 &&
   2292 				 sc->sc_type != WM_T_82546_3) {
   2293 				/*
   2294 				 * Work around a problem caused by the BIOS
   2295 				 * setting the max memory read byte count
   2296 				 * incorrectly.
   2297 				 */
   2298 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2299 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2300 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2301 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2302 
   2303 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2304 				    PCIX_CMD_BYTECNT_SHIFT;
   2305 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2306 				    PCIX_STATUS_MAXB_SHIFT;
   2307 				if (bytecnt > maxb) {
   2308 					aprint_verbose_dev(sc->sc_dev,
   2309 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2310 					    512 << bytecnt, 512 << maxb);
   2311 					pcix_cmd = (pcix_cmd &
   2312 					    ~PCIX_CMD_BYTECNT_MASK) |
   2313 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2314 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2315 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2316 					    pcix_cmd);
   2317 				}
   2318 			}
   2319 		}
   2320 		/*
   2321 		 * The quad port adapter is special; it has a PCIX-PCIX
   2322 		 * bridge on the board, and can run the secondary bus at
   2323 		 * a higher speed.
   2324 		 */
   2325 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2326 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2327 								      : 66;
   2328 		} else if (sc->sc_flags & WM_F_PCIX) {
   2329 			switch (reg & STATUS_PCIXSPD_MASK) {
   2330 			case STATUS_PCIXSPD_50_66:
   2331 				sc->sc_bus_speed = 66;
   2332 				break;
   2333 			case STATUS_PCIXSPD_66_100:
   2334 				sc->sc_bus_speed = 100;
   2335 				break;
   2336 			case STATUS_PCIXSPD_100_133:
   2337 				sc->sc_bus_speed = 133;
   2338 				break;
   2339 			default:
   2340 				aprint_error_dev(sc->sc_dev,
   2341 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2342 				    reg & STATUS_PCIXSPD_MASK);
   2343 				sc->sc_bus_speed = 66;
   2344 				break;
   2345 			}
   2346 		} else
   2347 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2348 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2349 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2350 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2351 	}
   2352 
   2353 	/* clear interesting stat counters */
   2354 	CSR_READ(sc, WMREG_COLC);
   2355 	CSR_READ(sc, WMREG_RXERRC);
   2356 
   2357 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2358 	    || (sc->sc_type >= WM_T_ICH8))
   2359 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2360 	if (sc->sc_type >= WM_T_ICH8)
   2361 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2362 
   2363 	/* Set PHY, NVM mutex related stuff */
   2364 	switch (sc->sc_type) {
   2365 	case WM_T_82542_2_0:
   2366 	case WM_T_82542_2_1:
   2367 	case WM_T_82543:
   2368 	case WM_T_82544:
   2369 		/* Microwire */
   2370 		sc->nvm.read = wm_nvm_read_uwire;
   2371 		sc->sc_nvm_wordsize = 64;
   2372 		sc->sc_nvm_addrbits = 6;
   2373 		break;
   2374 	case WM_T_82540:
   2375 	case WM_T_82545:
   2376 	case WM_T_82545_3:
   2377 	case WM_T_82546:
   2378 	case WM_T_82546_3:
   2379 		/* Microwire */
   2380 		sc->nvm.read = wm_nvm_read_uwire;
   2381 		reg = CSR_READ(sc, WMREG_EECD);
   2382 		if (reg & EECD_EE_SIZE) {
   2383 			sc->sc_nvm_wordsize = 256;
   2384 			sc->sc_nvm_addrbits = 8;
   2385 		} else {
   2386 			sc->sc_nvm_wordsize = 64;
   2387 			sc->sc_nvm_addrbits = 6;
   2388 		}
   2389 		sc->sc_flags |= WM_F_LOCK_EECD;
   2390 		sc->nvm.acquire = wm_get_eecd;
   2391 		sc->nvm.release = wm_put_eecd;
   2392 		break;
   2393 	case WM_T_82541:
   2394 	case WM_T_82541_2:
   2395 	case WM_T_82547:
   2396 	case WM_T_82547_2:
   2397 		reg = CSR_READ(sc, WMREG_EECD);
   2398 		/*
   2399 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2400 		 * on 8254[17], so set flags and functios before calling it.
   2401 		 */
   2402 		sc->sc_flags |= WM_F_LOCK_EECD;
   2403 		sc->nvm.acquire = wm_get_eecd;
   2404 		sc->nvm.release = wm_put_eecd;
   2405 		if (reg & EECD_EE_TYPE) {
   2406 			/* SPI */
   2407 			sc->nvm.read = wm_nvm_read_spi;
   2408 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2409 			wm_nvm_set_addrbits_size_eecd(sc);
   2410 		} else {
   2411 			/* Microwire */
   2412 			sc->nvm.read = wm_nvm_read_uwire;
   2413 			if ((reg & EECD_EE_ABITS) != 0) {
   2414 				sc->sc_nvm_wordsize = 256;
   2415 				sc->sc_nvm_addrbits = 8;
   2416 			} else {
   2417 				sc->sc_nvm_wordsize = 64;
   2418 				sc->sc_nvm_addrbits = 6;
   2419 			}
   2420 		}
   2421 		break;
   2422 	case WM_T_82571:
   2423 	case WM_T_82572:
   2424 		/* SPI */
   2425 		sc->nvm.read = wm_nvm_read_eerd;
   2426 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2427 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2428 		wm_nvm_set_addrbits_size_eecd(sc);
   2429 		sc->phy.acquire = wm_get_swsm_semaphore;
   2430 		sc->phy.release = wm_put_swsm_semaphore;
   2431 		sc->nvm.acquire = wm_get_nvm_82571;
   2432 		sc->nvm.release = wm_put_nvm_82571;
   2433 		break;
   2434 	case WM_T_82573:
   2435 	case WM_T_82574:
   2436 	case WM_T_82583:
   2437 		sc->nvm.read = wm_nvm_read_eerd;
   2438 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2439 		if (sc->sc_type == WM_T_82573) {
   2440 			sc->phy.acquire = wm_get_swsm_semaphore;
   2441 			sc->phy.release = wm_put_swsm_semaphore;
   2442 			sc->nvm.acquire = wm_get_nvm_82571;
   2443 			sc->nvm.release = wm_put_nvm_82571;
   2444 		} else {
   2445 			/* Both PHY and NVM use the same semaphore. */
   2446 			sc->phy.acquire = sc->nvm.acquire
   2447 			    = wm_get_swfwhw_semaphore;
   2448 			sc->phy.release = sc->nvm.release
   2449 			    = wm_put_swfwhw_semaphore;
   2450 		}
   2451 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2452 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2453 			sc->sc_nvm_wordsize = 2048;
   2454 		} else {
   2455 			/* SPI */
   2456 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2457 			wm_nvm_set_addrbits_size_eecd(sc);
   2458 		}
   2459 		break;
   2460 	case WM_T_82575:
   2461 	case WM_T_82576:
   2462 	case WM_T_82580:
   2463 	case WM_T_I350:
   2464 	case WM_T_I354:
   2465 	case WM_T_80003:
   2466 		/* SPI */
   2467 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2468 		wm_nvm_set_addrbits_size_eecd(sc);
   2469 		if ((sc->sc_type == WM_T_80003)
   2470 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2471 			sc->nvm.read = wm_nvm_read_eerd;
   2472 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2473 		} else {
   2474 			sc->nvm.read = wm_nvm_read_spi;
   2475 			sc->sc_flags |= WM_F_LOCK_EECD;
   2476 		}
   2477 		sc->phy.acquire = wm_get_phy_82575;
   2478 		sc->phy.release = wm_put_phy_82575;
   2479 		sc->nvm.acquire = wm_get_nvm_80003;
   2480 		sc->nvm.release = wm_put_nvm_80003;
   2481 		break;
   2482 	case WM_T_ICH8:
   2483 	case WM_T_ICH9:
   2484 	case WM_T_ICH10:
   2485 	case WM_T_PCH:
   2486 	case WM_T_PCH2:
   2487 	case WM_T_PCH_LPT:
   2488 		sc->nvm.read = wm_nvm_read_ich8;
   2489 		/* FLASH */
   2490 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2491 		sc->sc_nvm_wordsize = 2048;
   2492 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2493 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2494 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2495 			aprint_error_dev(sc->sc_dev,
   2496 			    "can't map FLASH registers\n");
   2497 			goto out;
   2498 		}
   2499 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2500 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2501 		    ICH_FLASH_SECTOR_SIZE;
   2502 		sc->sc_ich8_flash_bank_size =
   2503 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2504 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2505 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2506 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2507 		sc->sc_flashreg_offset = 0;
   2508 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2509 		sc->phy.release = wm_put_swflag_ich8lan;
   2510 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2511 		sc->nvm.release = wm_put_nvm_ich8lan;
   2512 		break;
   2513 	case WM_T_PCH_SPT:
   2514 	case WM_T_PCH_CNP:
   2515 		sc->nvm.read = wm_nvm_read_spt;
   2516 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2517 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2518 		sc->sc_flasht = sc->sc_st;
   2519 		sc->sc_flashh = sc->sc_sh;
   2520 		sc->sc_ich8_flash_base = 0;
   2521 		sc->sc_nvm_wordsize =
   2522 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2523 		    * NVM_SIZE_MULTIPLIER;
   2524 		/* It is size in bytes, we want words */
   2525 		sc->sc_nvm_wordsize /= 2;
   2526 		/* Assume 2 banks */
   2527 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2528 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2529 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2530 		sc->phy.release = wm_put_swflag_ich8lan;
   2531 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2532 		sc->nvm.release = wm_put_nvm_ich8lan;
   2533 		break;
   2534 	case WM_T_I210:
   2535 	case WM_T_I211:
   2536 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2537 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2538 		if (wm_nvm_flash_presence_i210(sc)) {
   2539 			sc->nvm.read = wm_nvm_read_eerd;
   2540 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2541 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2542 			wm_nvm_set_addrbits_size_eecd(sc);
   2543 		} else {
   2544 			sc->nvm.read = wm_nvm_read_invm;
   2545 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2546 			sc->sc_nvm_wordsize = INVM_SIZE;
   2547 		}
   2548 		sc->phy.acquire = wm_get_phy_82575;
   2549 		sc->phy.release = wm_put_phy_82575;
   2550 		sc->nvm.acquire = wm_get_nvm_80003;
   2551 		sc->nvm.release = wm_put_nvm_80003;
   2552 		break;
   2553 	default:
   2554 		break;
   2555 	}
   2556 
   2557 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2558 	switch (sc->sc_type) {
   2559 	case WM_T_82571:
   2560 	case WM_T_82572:
   2561 		reg = CSR_READ(sc, WMREG_SWSM2);
   2562 		if ((reg & SWSM2_LOCK) == 0) {
   2563 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2564 			force_clear_smbi = true;
   2565 		} else
   2566 			force_clear_smbi = false;
   2567 		break;
   2568 	case WM_T_82573:
   2569 	case WM_T_82574:
   2570 	case WM_T_82583:
   2571 		force_clear_smbi = true;
   2572 		break;
   2573 	default:
   2574 		force_clear_smbi = false;
   2575 		break;
   2576 	}
   2577 	if (force_clear_smbi) {
   2578 		reg = CSR_READ(sc, WMREG_SWSM);
   2579 		if ((reg & SWSM_SMBI) != 0)
   2580 			aprint_error_dev(sc->sc_dev,
   2581 			    "Please update the Bootagent\n");
   2582 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2583 	}
   2584 
   2585 	/*
   2586 	 * Defer printing the EEPROM type until after verifying the checksum
   2587 	 * This allows the EEPROM type to be printed correctly in the case
   2588 	 * that no EEPROM is attached.
   2589 	 */
   2590 	/*
   2591 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2592 	 * this for later, so we can fail future reads from the EEPROM.
   2593 	 */
   2594 	if (wm_nvm_validate_checksum(sc)) {
   2595 		/*
   2596 		 * Read twice again because some PCI-e parts fail the
   2597 		 * first check due to the link being in sleep state.
   2598 		 */
   2599 		if (wm_nvm_validate_checksum(sc))
   2600 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2601 	}
   2602 
   2603 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2604 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2605 	else {
   2606 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2607 		    sc->sc_nvm_wordsize);
   2608 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2609 			aprint_verbose("iNVM");
   2610 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2611 			aprint_verbose("FLASH(HW)");
   2612 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2613 			aprint_verbose("FLASH");
   2614 		else {
   2615 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2616 				eetype = "SPI";
   2617 			else
   2618 				eetype = "MicroWire";
   2619 			aprint_verbose("(%d address bits) %s EEPROM",
   2620 			    sc->sc_nvm_addrbits, eetype);
   2621 		}
   2622 	}
   2623 	wm_nvm_version(sc);
   2624 	aprint_verbose("\n");
   2625 
   2626 	/*
   2627 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2628 	 * incorrect.
   2629 	 */
   2630 	wm_gmii_setup_phytype(sc, 0, 0);
   2631 
   2632 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2633 	switch (sc->sc_type) {
   2634 	case WM_T_ICH8:
   2635 	case WM_T_ICH9:
   2636 	case WM_T_ICH10:
   2637 	case WM_T_PCH:
   2638 	case WM_T_PCH2:
   2639 	case WM_T_PCH_LPT:
   2640 	case WM_T_PCH_SPT:
   2641 	case WM_T_PCH_CNP:
   2642 		apme_mask = WUC_APME;
   2643 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2644 		if ((eeprom_data & apme_mask) != 0)
   2645 			sc->sc_flags |= WM_F_WOL;
   2646 		break;
   2647 	default:
   2648 		break;
   2649 	}
   2650 
   2651 	/* Reset the chip to a known state. */
   2652 	wm_reset(sc);
   2653 
   2654 	/*
   2655 	 * Check for I21[01] PLL workaround.
   2656 	 *
   2657 	 * Three cases:
   2658 	 * a) Chip is I211.
   2659 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2660 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2661 	 */
   2662 	if (sc->sc_type == WM_T_I211)
   2663 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2664 	if (sc->sc_type == WM_T_I210) {
   2665 		if (!wm_nvm_flash_presence_i210(sc))
   2666 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2667 		else if ((sc->sc_nvm_ver_major < 3)
   2668 		    || ((sc->sc_nvm_ver_major == 3)
   2669 			&& (sc->sc_nvm_ver_minor < 25))) {
   2670 			aprint_verbose_dev(sc->sc_dev,
   2671 			    "ROM image version %d.%d is older than 3.25\n",
   2672 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2673 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2674 		}
   2675 	}
   2676 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2677 		wm_pll_workaround_i210(sc);
   2678 
   2679 	wm_get_wakeup(sc);
   2680 
   2681 	/* Non-AMT based hardware can now take control from firmware */
   2682 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2683 		wm_get_hw_control(sc);
   2684 
   2685 	/*
   2686 	 * Read the Ethernet address from the EEPROM, if not first found
   2687 	 * in device properties.
   2688 	 */
   2689 	ea = prop_dictionary_get(dict, "mac-address");
   2690 	if (ea != NULL) {
   2691 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2692 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2693 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2694 	} else {
   2695 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2696 			aprint_error_dev(sc->sc_dev,
   2697 			    "unable to read Ethernet address\n");
   2698 			goto out;
   2699 		}
   2700 	}
   2701 
   2702 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2703 	    ether_sprintf(enaddr));
   2704 
   2705 	/*
   2706 	 * Read the config info from the EEPROM, and set up various
   2707 	 * bits in the control registers based on their contents.
   2708 	 */
   2709 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2710 	if (pn != NULL) {
   2711 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2712 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2713 	} else {
   2714 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2715 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2716 			goto out;
   2717 		}
   2718 	}
   2719 
   2720 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2721 	if (pn != NULL) {
   2722 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2723 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2724 	} else {
   2725 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2726 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2727 			goto out;
   2728 		}
   2729 	}
   2730 
   2731 	/* check for WM_F_WOL */
   2732 	switch (sc->sc_type) {
   2733 	case WM_T_82542_2_0:
   2734 	case WM_T_82542_2_1:
   2735 	case WM_T_82543:
   2736 		/* dummy? */
   2737 		eeprom_data = 0;
   2738 		apme_mask = NVM_CFG3_APME;
   2739 		break;
   2740 	case WM_T_82544:
   2741 		apme_mask = NVM_CFG2_82544_APM_EN;
   2742 		eeprom_data = cfg2;
   2743 		break;
   2744 	case WM_T_82546:
   2745 	case WM_T_82546_3:
   2746 	case WM_T_82571:
   2747 	case WM_T_82572:
   2748 	case WM_T_82573:
   2749 	case WM_T_82574:
   2750 	case WM_T_82583:
   2751 	case WM_T_80003:
   2752 	case WM_T_82575:
   2753 	case WM_T_82576:
   2754 		apme_mask = NVM_CFG3_APME;
   2755 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2756 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2757 		break;
   2758 	case WM_T_82580:
   2759 	case WM_T_I350:
   2760 	case WM_T_I354:
   2761 	case WM_T_I210:
   2762 	case WM_T_I211:
   2763 		apme_mask = NVM_CFG3_APME;
   2764 		wm_nvm_read(sc,
   2765 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2766 		    1, &eeprom_data);
   2767 		break;
   2768 	case WM_T_ICH8:
   2769 	case WM_T_ICH9:
   2770 	case WM_T_ICH10:
   2771 	case WM_T_PCH:
   2772 	case WM_T_PCH2:
   2773 	case WM_T_PCH_LPT:
   2774 	case WM_T_PCH_SPT:
   2775 	case WM_T_PCH_CNP:
   2776 		/* Already checked before wm_reset () */
   2777 		apme_mask = eeprom_data = 0;
   2778 		break;
   2779 	default: /* XXX 82540 */
   2780 		apme_mask = NVM_CFG3_APME;
   2781 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2782 		break;
   2783 	}
   2784 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2785 	if ((eeprom_data & apme_mask) != 0)
   2786 		sc->sc_flags |= WM_F_WOL;
   2787 
   2788 	/*
   2789 	 * We have the eeprom settings, now apply the special cases
   2790 	 * where the eeprom may be wrong or the board won't support
   2791 	 * wake on lan on a particular port
   2792 	 */
   2793 	switch (sc->sc_pcidevid) {
   2794 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2795 		sc->sc_flags &= ~WM_F_WOL;
   2796 		break;
   2797 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2798 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2799 		/* Wake events only supported on port A for dual fiber
   2800 		 * regardless of eeprom setting */
   2801 		if (sc->sc_funcid == 1)
   2802 			sc->sc_flags &= ~WM_F_WOL;
   2803 		break;
   2804 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2805 		/* If quad port adapter, disable WoL on all but port A */
   2806 		if (sc->sc_funcid != 0)
   2807 			sc->sc_flags &= ~WM_F_WOL;
   2808 		break;
   2809 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2810 		/* Wake events only supported on port A for dual fiber
   2811 		 * regardless of eeprom setting */
   2812 		if (sc->sc_funcid == 1)
   2813 			sc->sc_flags &= ~WM_F_WOL;
   2814 		break;
   2815 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2816 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2817 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2818 		/* If quad port adapter, disable WoL on all but port A */
   2819 		if (sc->sc_funcid != 0)
   2820 			sc->sc_flags &= ~WM_F_WOL;
   2821 		break;
   2822 	}
   2823 
   2824 	if (sc->sc_type >= WM_T_82575) {
   2825 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2826 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2827 			    nvmword);
   2828 			if ((sc->sc_type == WM_T_82575) ||
   2829 			    (sc->sc_type == WM_T_82576)) {
   2830 				/* Check NVM for autonegotiation */
   2831 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2832 				    != 0)
   2833 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2834 			}
   2835 			if ((sc->sc_type == WM_T_82575) ||
   2836 			    (sc->sc_type == WM_T_I350)) {
   2837 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2838 					sc->sc_flags |= WM_F_MAS;
   2839 			}
   2840 		}
   2841 	}
   2842 
   2843 	/*
   2844 	 * XXX need special handling for some multiple port cards
   2845 	 * to disable a paticular port.
   2846 	 */
   2847 
   2848 	if (sc->sc_type >= WM_T_82544) {
   2849 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2850 		if (pn != NULL) {
   2851 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2852 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2853 		} else {
   2854 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2855 				aprint_error_dev(sc->sc_dev,
   2856 				    "unable to read SWDPIN\n");
   2857 				goto out;
   2858 			}
   2859 		}
   2860 	}
   2861 
   2862 	if (cfg1 & NVM_CFG1_ILOS)
   2863 		sc->sc_ctrl |= CTRL_ILOS;
   2864 
   2865 	/*
   2866 	 * XXX
   2867 	 * This code isn't correct because pin 2 and 3 are located
   2868 	 * in different position on newer chips. Check all datasheet.
   2869 	 *
   2870 	 * Until resolve this problem, check if a chip < 82580
   2871 	 */
   2872 	if (sc->sc_type <= WM_T_82580) {
   2873 		if (sc->sc_type >= WM_T_82544) {
   2874 			sc->sc_ctrl |=
   2875 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2876 			    CTRL_SWDPIO_SHIFT;
   2877 			sc->sc_ctrl |=
   2878 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2879 			    CTRL_SWDPINS_SHIFT;
   2880 		} else {
   2881 			sc->sc_ctrl |=
   2882 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2883 			    CTRL_SWDPIO_SHIFT;
   2884 		}
   2885 	}
   2886 
   2887 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2888 		wm_nvm_read(sc,
   2889 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2890 		    1, &nvmword);
   2891 		if (nvmword & NVM_CFG3_ILOS)
   2892 			sc->sc_ctrl |= CTRL_ILOS;
   2893 	}
   2894 
   2895 #if 0
   2896 	if (sc->sc_type >= WM_T_82544) {
   2897 		if (cfg1 & NVM_CFG1_IPS0)
   2898 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2899 		if (cfg1 & NVM_CFG1_IPS1)
   2900 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2901 		sc->sc_ctrl_ext |=
   2902 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2903 		    CTRL_EXT_SWDPIO_SHIFT;
   2904 		sc->sc_ctrl_ext |=
   2905 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2906 		    CTRL_EXT_SWDPINS_SHIFT;
   2907 	} else {
   2908 		sc->sc_ctrl_ext |=
   2909 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2910 		    CTRL_EXT_SWDPIO_SHIFT;
   2911 	}
   2912 #endif
   2913 
   2914 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2915 #if 0
   2916 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2917 #endif
   2918 
   2919 	if (sc->sc_type == WM_T_PCH) {
   2920 		uint16_t val;
   2921 
   2922 		/* Save the NVM K1 bit setting */
   2923 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2924 
   2925 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2926 			sc->sc_nvm_k1_enabled = 1;
   2927 		else
   2928 			sc->sc_nvm_k1_enabled = 0;
   2929 	}
   2930 
   2931 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2932 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2933 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2934 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2935 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2936 	    || sc->sc_type == WM_T_82573
   2937 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2938 		/* Copper only */
   2939 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2940 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2941 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2942 	    || (sc->sc_type ==WM_T_I211)) {
   2943 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2944 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2945 		switch (link_mode) {
   2946 		case CTRL_EXT_LINK_MODE_1000KX:
   2947 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2948 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2949 			break;
   2950 		case CTRL_EXT_LINK_MODE_SGMII:
   2951 			if (wm_sgmii_uses_mdio(sc)) {
   2952 				aprint_normal_dev(sc->sc_dev,
   2953 				    "SGMII(MDIO)\n");
   2954 				sc->sc_flags |= WM_F_SGMII;
   2955 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2956 				break;
   2957 			}
   2958 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2959 			/*FALLTHROUGH*/
   2960 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2961 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2962 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2963 				if (link_mode
   2964 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2965 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2966 					sc->sc_flags |= WM_F_SGMII;
   2967 					aprint_verbose_dev(sc->sc_dev,
   2968 					    "SGMII\n");
   2969 				} else {
   2970 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2971 					aprint_verbose_dev(sc->sc_dev,
   2972 					    "SERDES\n");
   2973 				}
   2974 				break;
   2975 			}
   2976 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2977 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2978 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2979 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2980 				sc->sc_flags |= WM_F_SGMII;
   2981 			}
   2982 			/* Do not change link mode for 100BaseFX */
   2983 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2984 				break;
   2985 
   2986 			/* Change current link mode setting */
   2987 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2988 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2989 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2990 			else
   2991 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2992 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2993 			break;
   2994 		case CTRL_EXT_LINK_MODE_GMII:
   2995 		default:
   2996 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2997 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2998 			break;
   2999 		}
   3000 
   3001 		reg &= ~CTRL_EXT_I2C_ENA;
   3002 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3003 			reg |= CTRL_EXT_I2C_ENA;
   3004 		else
   3005 			reg &= ~CTRL_EXT_I2C_ENA;
   3006 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3007 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3008 			if (!wm_sgmii_uses_mdio(sc))
   3009 				wm_gmii_setup_phytype(sc, 0, 0);
   3010 			wm_reset_mdicnfg_82580(sc);
   3011 		}
   3012 	} else if (sc->sc_type < WM_T_82543 ||
   3013 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3014 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3015 			aprint_error_dev(sc->sc_dev,
   3016 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3017 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3018 		}
   3019 	} else {
   3020 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3021 			aprint_error_dev(sc->sc_dev,
   3022 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3023 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3024 		}
   3025 	}
   3026 
   3027 	if (sc->sc_type >= WM_T_PCH2)
   3028 		sc->sc_flags |= WM_F_EEE;
   3029 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3030 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3031 		/* XXX: Need special handling for I354. (not yet) */
   3032 		if (sc->sc_type != WM_T_I354)
   3033 			sc->sc_flags |= WM_F_EEE;
   3034 	}
   3035 
   3036 	/*
   3037 	 * The I350 has a bug where it always strips the CRC whether
   3038 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3039 	 */
   3040 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3041 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3042 		sc->sc_flags |= WM_F_CRC_STRIP;
   3043 
   3044 	/* Set device properties (macflags) */
   3045 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3046 
   3047 	if (sc->sc_flags != 0) {
   3048 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3049 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3050 	}
   3051 
   3052 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3053 
   3054 	/* Initialize the media structures accordingly. */
   3055 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3056 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3057 	else
   3058 		wm_tbi_mediainit(sc); /* All others */
   3059 
   3060 	ifp = &sc->sc_ethercom.ec_if;
   3061 	xname = device_xname(sc->sc_dev);
   3062 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3063 	ifp->if_softc = sc;
   3064 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3065 	ifp->if_extflags = IFEF_MPSAFE;
   3066 	ifp->if_ioctl = wm_ioctl;
   3067 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3068 		ifp->if_start = wm_nq_start;
   3069 		/*
   3070 		 * When the number of CPUs is one and the controller can use
   3071 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3072 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3073 		 * and the other is used for link status changing.
   3074 		 * In this situation, wm_nq_transmit() is disadvantageous
   3075 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3076 		 */
   3077 		if (wm_is_using_multiqueue(sc))
   3078 			ifp->if_transmit = wm_nq_transmit;
   3079 	} else {
   3080 		ifp->if_start = wm_start;
   3081 		/*
   3082 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3083 		 * described above.
   3084 		 */
   3085 		if (wm_is_using_multiqueue(sc))
   3086 			ifp->if_transmit = wm_transmit;
   3087 	}
   3088 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3089 	ifp->if_init = wm_init;
   3090 	ifp->if_stop = wm_stop;
   3091 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3092 	IFQ_SET_READY(&ifp->if_snd);
   3093 
   3094 	/* Check for jumbo frame */
   3095 	switch (sc->sc_type) {
   3096 	case WM_T_82573:
   3097 		/* XXX limited to 9234 if ASPM is disabled */
   3098 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3099 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3100 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3101 		break;
   3102 	case WM_T_82571:
   3103 	case WM_T_82572:
   3104 	case WM_T_82574:
   3105 	case WM_T_82583:
   3106 	case WM_T_82575:
   3107 	case WM_T_82576:
   3108 	case WM_T_82580:
   3109 	case WM_T_I350:
   3110 	case WM_T_I354:
   3111 	case WM_T_I210:
   3112 	case WM_T_I211:
   3113 	case WM_T_80003:
   3114 	case WM_T_ICH9:
   3115 	case WM_T_ICH10:
   3116 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3117 	case WM_T_PCH_LPT:
   3118 	case WM_T_PCH_SPT:
   3119 	case WM_T_PCH_CNP:
   3120 		/* XXX limited to 9234 */
   3121 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3122 		break;
   3123 	case WM_T_PCH:
   3124 		/* XXX limited to 4096 */
   3125 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3126 		break;
   3127 	case WM_T_82542_2_0:
   3128 	case WM_T_82542_2_1:
   3129 	case WM_T_ICH8:
   3130 		/* No support for jumbo frame */
   3131 		break;
   3132 	default:
   3133 		/* ETHER_MAX_LEN_JUMBO */
   3134 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3135 		break;
   3136 	}
   3137 
   3138 	/* If we're a i82543 or greater, we can support VLANs. */
   3139 	if (sc->sc_type >= WM_T_82543) {
   3140 		sc->sc_ethercom.ec_capabilities |=
   3141 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3142 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3143 	}
   3144 
   3145 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3146 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3147 
   3148 	/*
   3149 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3150 	 * on i82543 and later.
   3151 	 */
   3152 	if (sc->sc_type >= WM_T_82543) {
   3153 		ifp->if_capabilities |=
   3154 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3155 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3156 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3157 		    IFCAP_CSUM_TCPv6_Tx |
   3158 		    IFCAP_CSUM_UDPv6_Tx;
   3159 	}
   3160 
   3161 	/*
   3162 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3163 	 *
   3164 	 *	82541GI (8086:1076) ... no
   3165 	 *	82572EI (8086:10b9) ... yes
   3166 	 */
   3167 	if (sc->sc_type >= WM_T_82571) {
   3168 		ifp->if_capabilities |=
   3169 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3170 	}
   3171 
   3172 	/*
   3173 	 * If we're a i82544 or greater (except i82547), we can do
   3174 	 * TCP segmentation offload.
   3175 	 */
   3176 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3177 		ifp->if_capabilities |= IFCAP_TSOv4;
   3178 
   3179 	if (sc->sc_type >= WM_T_82571)
   3180 		ifp->if_capabilities |= IFCAP_TSOv6;
   3181 
   3182 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3183 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3184 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3185 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3186 
   3187 	/* Attach the interface. */
   3188 	if_initialize(ifp);
   3189 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3190 	ether_ifattach(ifp, enaddr);
   3191 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3192 	if_register(ifp);
   3193 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3194 	    RND_FLAG_DEFAULT);
   3195 
   3196 #ifdef WM_EVENT_COUNTERS
   3197 	/* Attach event counters. */
   3198 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3199 	    NULL, xname, "linkintr");
   3200 
   3201 	if (sc->sc_type >= WM_T_82542_2_1) {
   3202 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3203 		    NULL, xname, "tx_xoff");
   3204 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3205 		    NULL, xname, "tx_xon");
   3206 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3207 		    NULL, xname, "rx_xoff");
   3208 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3209 		    NULL, xname, "rx_xon");
   3210 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3211 		    NULL, xname, "rx_macctl");
   3212 	}
   3213 
   3214 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3215 	    NULL, xname, "CRC Error");
   3216 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3217 	    NULL, xname, "Symbol Error");
   3218 
   3219 	if (sc->sc_type >= WM_T_82543) {
   3220 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3221 		    NULL, xname, "Alignment Error");
   3222 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3223 		    NULL, xname, "Receive Error");
   3224 		evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
   3225 		    NULL, xname, "Carrier Extension Error");
   3226 	}
   3227 
   3228 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3229 	    NULL, xname, "Missed Packets");
   3230 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3231 	    NULL, xname, "Collision");
   3232 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3233 	    NULL, xname, "Sequence Error");
   3234 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3235 	    NULL, xname, "Receive Length Error");
   3236 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3237 	    NULL, xname, "Single Collision");
   3238 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3239 	    NULL, xname, "Excessive Collisions");
   3240 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3241 	    NULL, xname, "Multiple Collision");
   3242 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3243 	    NULL, xname, "Late Collisions");
   3244 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3245 	    NULL, xname, "Defer");
   3246 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3247 	    NULL, xname, "Good Packets Rx");
   3248 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3249 	    NULL, xname, "Broadcast Packets Rx");
   3250 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3251 	    NULL, xname, "Multicast Packets Rx");
   3252 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3253 	    NULL, xname, "Good Packets Tx");
   3254 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3255 	    NULL, xname, "Good Octets Rx");
   3256 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3257 	    NULL, xname, "Good Octets Tx");
   3258 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3259 	    NULL, xname, "Rx No Buffers");
   3260 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3261 	    NULL, xname, "Rx Undersize");
   3262 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3263 	    NULL, xname, "Rx Fragment");
   3264 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3265 	    NULL, xname, "Rx Oversize");
   3266 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3267 	    NULL, xname, "Rx Jabber");
   3268 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3269 	    NULL, xname, "Total Octets Rx");
   3270 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3271 	    NULL, xname, "Total Octets Tx");
   3272 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3273 	    NULL, xname, "Total Packets Rx");
   3274 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3275 	    NULL, xname, "Total Packets Tx");
   3276 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3277 	    NULL, xname, "Multicast Packets Tx");
   3278 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3279 	    NULL, xname, "Broadcast Packets Tx Count");
   3280 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3281 	    NULL, xname, "Packets Rx (64 bytes)");
   3282 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3283 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3284 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3285 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3286 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3287 	    NULL, xname, "Packets Rx (255-511 bytes)");
   3288 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3289 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3290 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3291 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3292 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3293 	    NULL, xname, "Packets Tx (64 bytes)");
   3294 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3295 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3296 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3297 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3298 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3299 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3300 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3301 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3302 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3303 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3304 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3305 	    NULL, xname, "Interrupt Assertion");
   3306 	evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3307 	    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3308 	evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3309 	    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3310 	evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3311 	    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3312 	evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
   3313 	    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3314 	evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3315 	    NULL, xname, "Intr. Cause Tx Queue Empty");
   3316 	evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3317 	    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3318 	evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
   3319 	    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3320 	evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3321 	    NULL, xname, "Interrupt Cause Receiver Overrun");
   3322 	if (sc->sc_type >= WM_T_82543) {
   3323 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3324 		    NULL, xname, "Tx with No CRS");
   3325 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3326 		    NULL, xname, "TCP Segmentation Context Tx");
   3327 		evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
   3328 		    NULL, xname, "TCP Segmentation Context Tx Fail");
   3329 	}
   3330 	if (sc->sc_type >= WM_T_82540) {
   3331 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3332 		    NULL, xname, "Management Packets RX");
   3333 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3334 		    NULL, xname, "Management Packets Dropped");
   3335 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3336 		    NULL, xname, "Management Packets TX");
   3337 	}
   3338 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3339 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3340 		    NULL, xname, "BMC2OS Packets received by host");
   3341 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3342 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3343 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3344 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3345 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3346 		    NULL, xname, "OS2BMC Packets received by BMC");
   3347 	}
   3348 #endif /* WM_EVENT_COUNTERS */
   3349 
   3350 	sc->sc_txrx_use_workqueue = false;
   3351 
   3352 	if (wm_phy_need_linkdown_discard(sc)) {
   3353 		DPRINTF(sc, WM_DEBUG_LINK,
   3354 		    ("%s: %s: Set linkdown discard flag\n",
   3355 			device_xname(sc->sc_dev), __func__));
   3356 		wm_set_linkdown_discard(sc);
   3357 	}
   3358 
   3359 	wm_init_sysctls(sc);
   3360 
   3361 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3362 		pmf_class_network_register(self, ifp);
   3363 	else
   3364 		aprint_error_dev(self, "couldn't establish power handler\n");
   3365 
   3366 	sc->sc_flags |= WM_F_ATTACHED;
   3367 out:
   3368 	return;
   3369 }
   3370 
   3371 /* The detach function (ca_detach) */
   3372 static int
   3373 wm_detach(device_t self, int flags __unused)
   3374 {
   3375 	struct wm_softc *sc = device_private(self);
   3376 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3377 	int i;
   3378 
   3379 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3380 		return 0;
   3381 
   3382 	/* Stop the interface. Callouts are stopped in it. */
   3383 	IFNET_LOCK(ifp);
   3384 	sc->sc_dying = true;
   3385 	wm_stop(ifp, 1);
   3386 	IFNET_UNLOCK(ifp);
   3387 
   3388 	pmf_device_deregister(self);
   3389 
   3390 	sysctl_teardown(&sc->sc_sysctllog);
   3391 
   3392 #ifdef WM_EVENT_COUNTERS
   3393 	evcnt_detach(&sc->sc_ev_linkintr);
   3394 
   3395 	if (sc->sc_type >= WM_T_82542_2_1) {
   3396 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3397 		evcnt_detach(&sc->sc_ev_tx_xon);
   3398 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3399 		evcnt_detach(&sc->sc_ev_rx_xon);
   3400 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3401 	}
   3402 
   3403 	evcnt_detach(&sc->sc_ev_crcerrs);
   3404 	evcnt_detach(&sc->sc_ev_symerrc);
   3405 
   3406 	if (sc->sc_type >= WM_T_82543) {
   3407 		evcnt_detach(&sc->sc_ev_algnerrc);
   3408 		evcnt_detach(&sc->sc_ev_rxerrc);
   3409 		evcnt_detach(&sc->sc_ev_cexterr);
   3410 	}
   3411 	evcnt_detach(&sc->sc_ev_mpc);
   3412 	evcnt_detach(&sc->sc_ev_colc);
   3413 	evcnt_detach(&sc->sc_ev_sec);
   3414 	evcnt_detach(&sc->sc_ev_rlec);
   3415 	evcnt_detach(&sc->sc_ev_scc);
   3416 	evcnt_detach(&sc->sc_ev_ecol);
   3417 	evcnt_detach(&sc->sc_ev_mcc);
   3418 	evcnt_detach(&sc->sc_ev_latecol);
   3419 	evcnt_detach(&sc->sc_ev_dc);
   3420 	evcnt_detach(&sc->sc_ev_gprc);
   3421 	evcnt_detach(&sc->sc_ev_bprc);
   3422 	evcnt_detach(&sc->sc_ev_mprc);
   3423 	evcnt_detach(&sc->sc_ev_gptc);
   3424 	evcnt_detach(&sc->sc_ev_gorc);
   3425 	evcnt_detach(&sc->sc_ev_gotc);
   3426 	evcnt_detach(&sc->sc_ev_rnbc);
   3427 	evcnt_detach(&sc->sc_ev_ruc);
   3428 	evcnt_detach(&sc->sc_ev_rfc);
   3429 	evcnt_detach(&sc->sc_ev_roc);
   3430 	evcnt_detach(&sc->sc_ev_rjc);
   3431 	evcnt_detach(&sc->sc_ev_tor);
   3432 	evcnt_detach(&sc->sc_ev_tot);
   3433 	evcnt_detach(&sc->sc_ev_tpr);
   3434 	evcnt_detach(&sc->sc_ev_tpt);
   3435 	evcnt_detach(&sc->sc_ev_mptc);
   3436 	evcnt_detach(&sc->sc_ev_bptc);
   3437 	evcnt_detach(&sc->sc_ev_prc64);
   3438 	evcnt_detach(&sc->sc_ev_prc127);
   3439 	evcnt_detach(&sc->sc_ev_prc255);
   3440 	evcnt_detach(&sc->sc_ev_prc511);
   3441 	evcnt_detach(&sc->sc_ev_prc1023);
   3442 	evcnt_detach(&sc->sc_ev_prc1522);
   3443 	evcnt_detach(&sc->sc_ev_ptc64);
   3444 	evcnt_detach(&sc->sc_ev_ptc127);
   3445 	evcnt_detach(&sc->sc_ev_ptc255);
   3446 	evcnt_detach(&sc->sc_ev_ptc511);
   3447 	evcnt_detach(&sc->sc_ev_ptc1023);
   3448 	evcnt_detach(&sc->sc_ev_ptc1522);
   3449 	evcnt_detach(&sc->sc_ev_iac);
   3450 	evcnt_detach(&sc->sc_ev_icrxptc);
   3451 	evcnt_detach(&sc->sc_ev_icrxatc);
   3452 	evcnt_detach(&sc->sc_ev_ictxptc);
   3453 	evcnt_detach(&sc->sc_ev_ictxact);
   3454 	evcnt_detach(&sc->sc_ev_ictxqec);
   3455 	evcnt_detach(&sc->sc_ev_ictxqmtc);
   3456 	evcnt_detach(&sc->sc_ev_icrxdmtc);
   3457 	evcnt_detach(&sc->sc_ev_icrxoc);
   3458 	if (sc->sc_type >= WM_T_82543) {
   3459 		evcnt_detach(&sc->sc_ev_tncrs);
   3460 		evcnt_detach(&sc->sc_ev_tsctc);
   3461 		evcnt_detach(&sc->sc_ev_tsctfc);
   3462 	}
   3463 	if (sc->sc_type >= WM_T_82540) {
   3464 		evcnt_detach(&sc->sc_ev_mgtprc);
   3465 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3466 		evcnt_detach(&sc->sc_ev_mgtptc);
   3467 	}
   3468 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3469 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3470 		evcnt_detach(&sc->sc_ev_o2bspc);
   3471 		evcnt_detach(&sc->sc_ev_b2ospc);
   3472 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3473 	}
   3474 #endif /* WM_EVENT_COUNTERS */
   3475 
   3476 	rnd_detach_source(&sc->rnd_source);
   3477 
   3478 	/* Tell the firmware about the release */
   3479 	mutex_enter(sc->sc_core_lock);
   3480 	wm_release_manageability(sc);
   3481 	wm_release_hw_control(sc);
   3482 	wm_enable_wakeup(sc);
   3483 	mutex_exit(sc->sc_core_lock);
   3484 
   3485 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3486 
   3487 	ether_ifdetach(ifp);
   3488 	if_detach(ifp);
   3489 	if_percpuq_destroy(sc->sc_ipq);
   3490 
   3491 	/* Delete all remaining media. */
   3492 	ifmedia_fini(&sc->sc_mii.mii_media);
   3493 
   3494 	/* Unload RX dmamaps and free mbufs */
   3495 	for (i = 0; i < sc->sc_nqueues; i++) {
   3496 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3497 		mutex_enter(rxq->rxq_lock);
   3498 		wm_rxdrain(rxq);
   3499 		mutex_exit(rxq->rxq_lock);
   3500 	}
   3501 	/* Must unlock here */
   3502 
   3503 	/* Disestablish the interrupt handler */
   3504 	for (i = 0; i < sc->sc_nintrs; i++) {
   3505 		if (sc->sc_ihs[i] != NULL) {
   3506 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3507 			sc->sc_ihs[i] = NULL;
   3508 		}
   3509 	}
   3510 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3511 
   3512 	/* wm_stop() ensured that the workqueues are stopped. */
   3513 	workqueue_destroy(sc->sc_queue_wq);
   3514 	workqueue_destroy(sc->sc_reset_wq);
   3515 
   3516 	for (i = 0; i < sc->sc_nqueues; i++)
   3517 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3518 
   3519 	wm_free_txrx_queues(sc);
   3520 
   3521 	/* Unmap the registers */
   3522 	if (sc->sc_ss) {
   3523 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3524 		sc->sc_ss = 0;
   3525 	}
   3526 	if (sc->sc_ios) {
   3527 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3528 		sc->sc_ios = 0;
   3529 	}
   3530 	if (sc->sc_flashs) {
   3531 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3532 		sc->sc_flashs = 0;
   3533 	}
   3534 
   3535 	if (sc->sc_core_lock)
   3536 		mutex_obj_free(sc->sc_core_lock);
   3537 	if (sc->sc_ich_phymtx)
   3538 		mutex_obj_free(sc->sc_ich_phymtx);
   3539 	if (sc->sc_ich_nvmmtx)
   3540 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3541 
   3542 	return 0;
   3543 }
   3544 
   3545 static bool
   3546 wm_suspend(device_t self, const pmf_qual_t *qual)
   3547 {
   3548 	struct wm_softc *sc = device_private(self);
   3549 
   3550 	wm_release_manageability(sc);
   3551 	wm_release_hw_control(sc);
   3552 	wm_enable_wakeup(sc);
   3553 
   3554 	return true;
   3555 }
   3556 
   3557 static bool
   3558 wm_resume(device_t self, const pmf_qual_t *qual)
   3559 {
   3560 	struct wm_softc *sc = device_private(self);
   3561 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3562 	pcireg_t reg;
   3563 	char buf[256];
   3564 
   3565 	reg = CSR_READ(sc, WMREG_WUS);
   3566 	if (reg != 0) {
   3567 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3568 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3569 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3570 	}
   3571 
   3572 	if (sc->sc_type >= WM_T_PCH2)
   3573 		wm_resume_workarounds_pchlan(sc);
   3574 	IFNET_LOCK(ifp);
   3575 	if ((ifp->if_flags & IFF_UP) == 0) {
   3576 		/* >= PCH_SPT hardware workaround before reset. */
   3577 		if (sc->sc_type >= WM_T_PCH_SPT)
   3578 			wm_flush_desc_rings(sc);
   3579 
   3580 		wm_reset(sc);
   3581 		/* Non-AMT based hardware can now take control from firmware */
   3582 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3583 			wm_get_hw_control(sc);
   3584 		wm_init_manageability(sc);
   3585 	} else {
   3586 		/*
   3587 		 * We called pmf_class_network_register(), so if_init() is
   3588 		 * automatically called when IFF_UP. wm_reset(),
   3589 		 * wm_get_hw_control() and wm_init_manageability() are called
   3590 		 * via wm_init().
   3591 		 */
   3592 	}
   3593 	IFNET_UNLOCK(ifp);
   3594 
   3595 	return true;
   3596 }
   3597 
   3598 /*
   3599  * wm_watchdog:
   3600  *
   3601  *	Watchdog checker.
   3602  */
   3603 static bool
   3604 wm_watchdog(struct ifnet *ifp)
   3605 {
   3606 	int qid;
   3607 	struct wm_softc *sc = ifp->if_softc;
   3608 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3609 
   3610 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3611 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3612 
   3613 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3614 	}
   3615 
   3616 #ifdef WM_DEBUG
   3617 	if (sc->sc_trigger_reset) {
   3618 		/* debug operation, no need for atomicity or reliability */
   3619 		sc->sc_trigger_reset = 0;
   3620 		hang_queue++;
   3621 	}
   3622 #endif
   3623 
   3624 	if (hang_queue == 0)
   3625 		return true;
   3626 
   3627 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3628 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3629 
   3630 	return false;
   3631 }
   3632 
   3633 /*
   3634  * Perform an interface watchdog reset.
   3635  */
   3636 static void
   3637 wm_handle_reset_work(struct work *work, void *arg)
   3638 {
   3639 	struct wm_softc * const sc = arg;
   3640 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3641 
   3642 	/* Don't want ioctl operations to happen */
   3643 	IFNET_LOCK(ifp);
   3644 
   3645 	/* reset the interface. */
   3646 	wm_init(ifp);
   3647 
   3648 	IFNET_UNLOCK(ifp);
   3649 
   3650 	/*
   3651 	 * There are still some upper layer processing which call
   3652 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3653 	 */
   3654 	/* Try to get more packets going. */
   3655 	ifp->if_start(ifp);
   3656 
   3657 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3658 }
   3659 
   3660 
   3661 static void
   3662 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3663 {
   3664 
   3665 	mutex_enter(txq->txq_lock);
   3666 	if (txq->txq_sending &&
   3667 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3668 		wm_watchdog_txq_locked(ifp, txq, hang);
   3669 
   3670 	mutex_exit(txq->txq_lock);
   3671 }
   3672 
   3673 static void
   3674 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3675     uint16_t *hang)
   3676 {
   3677 	struct wm_softc *sc = ifp->if_softc;
   3678 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3679 
   3680 	KASSERT(mutex_owned(txq->txq_lock));
   3681 
   3682 	/*
   3683 	 * Since we're using delayed interrupts, sweep up
   3684 	 * before we report an error.
   3685 	 */
   3686 	wm_txeof(txq, UINT_MAX);
   3687 
   3688 	if (txq->txq_sending)
   3689 		*hang |= __BIT(wmq->wmq_id);
   3690 
   3691 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3692 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3693 		    device_xname(sc->sc_dev));
   3694 	} else {
   3695 #ifdef WM_DEBUG
   3696 		int i, j;
   3697 		struct wm_txsoft *txs;
   3698 #endif
   3699 		log(LOG_ERR,
   3700 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3701 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3702 		    txq->txq_next);
   3703 		if_statinc(ifp, if_oerrors);
   3704 #ifdef WM_DEBUG
   3705 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3706 		    i = WM_NEXTTXS(txq, i)) {
   3707 			txs = &txq->txq_soft[i];
   3708 			printf("txs %d tx %d -> %d\n",
   3709 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3710 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3711 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3712 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3713 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3714 					printf("\t %#08x%08x\n",
   3715 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3716 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3717 				} else {
   3718 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3719 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3720 					    txq->txq_descs[j].wtx_addr.wa_low);
   3721 					printf("\t %#04x%02x%02x%08x\n",
   3722 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3723 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3724 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3725 					    txq->txq_descs[j].wtx_cmdlen);
   3726 				}
   3727 				if (j == txs->txs_lastdesc)
   3728 					break;
   3729 			}
   3730 		}
   3731 #endif
   3732 	}
   3733 }
   3734 
   3735 /*
   3736  * wm_tick:
   3737  *
   3738  *	One second timer, used to check link status, sweep up
   3739  *	completed transmit jobs, etc.
   3740  */
   3741 static void
   3742 wm_tick(void *arg)
   3743 {
   3744 	struct wm_softc *sc = arg;
   3745 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3746 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   3747 	    cexterr;
   3748 
   3749 	mutex_enter(sc->sc_core_lock);
   3750 
   3751 	if (sc->sc_core_stopping) {
   3752 		mutex_exit(sc->sc_core_lock);
   3753 		return;
   3754 	}
   3755 
   3756 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   3757 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   3758 	mpc = CSR_READ(sc, WMREG_MPC);
   3759 	colc = CSR_READ(sc, WMREG_COLC);
   3760 	sec = CSR_READ(sc, WMREG_SEC);
   3761 	rlec = CSR_READ(sc, WMREG_RLEC);
   3762 
   3763 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   3764 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   3765 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   3766 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   3767 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   3768 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   3769 
   3770 	if (sc->sc_type >= WM_T_82542_2_1) {
   3771 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3772 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3773 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3774 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3775 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3776 	}
   3777 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   3778 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   3779 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   3780 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   3781 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   3782 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   3783 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   3784 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   3785 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   3786 
   3787 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   3788 	    CSR_READ(sc, WMREG_GORCL) + CSR_READ(sc, WMREG_GORCH));
   3789 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   3790 	    CSR_READ(sc, WMREG_GOTCL) + CSR_READ(sc, WMREG_GOTCH));
   3791 
   3792 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   3793 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   3794 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   3795 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   3796 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   3797 
   3798 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   3799 	    CSR_READ(sc, WMREG_TORL) + CSR_READ(sc, WMREG_TORH));
   3800 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   3801 	    CSR_READ(sc, WMREG_TOTL) + CSR_READ(sc, WMREG_TOTH));
   3802 
   3803 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   3804 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   3805 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   3806 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   3807 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   3808 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   3809 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   3810 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   3811 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   3812 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   3813 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   3814 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   3815 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   3816 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   3817 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   3818 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   3819 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   3820 	WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   3821 	WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   3822 	WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   3823 	WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
   3824 	WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   3825 	WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
   3826 	WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
   3827 	WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   3828 
   3829 	if (sc->sc_type >= WM_T_82543) {
   3830 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   3831 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   3832 		cexterr = CSR_READ(sc, WMREG_CEXTERR);
   3833 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   3834 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   3835 		WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   3836 
   3837 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   3838 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   3839 		WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
   3840 	} else
   3841 		algnerrc = rxerrc = cexterr = 0;
   3842 
   3843 	if (sc->sc_type >= WM_T_82540) {
   3844 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   3845 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   3846 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   3847 	}
   3848 	if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
   3849 	    && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
   3850 		WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
   3851 		WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
   3852 		WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
   3853 		WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
   3854 	}
   3855 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3856 	if_statadd_ref(nsr, if_collisions, colc);
   3857 	if_statadd_ref(nsr, if_ierrors,
   3858 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   3859 	/*
   3860 	 * WMREG_RNBC is incremented when there are no available buffers in host
   3861 	 * memory. It does not mean the number of dropped packets, because an
   3862 	 * Ethernet controller can receive packets in such case if there is
   3863 	 * space in the phy's FIFO.
   3864 	 *
   3865 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3866 	 * own EVCNT instead of if_iqdrops.
   3867 	 */
   3868 	if_statadd_ref(nsr, if_iqdrops, mpc);
   3869 	IF_STAT_PUTREF(ifp);
   3870 
   3871 	if (sc->sc_flags & WM_F_HAS_MII)
   3872 		mii_tick(&sc->sc_mii);
   3873 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3874 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3875 		wm_serdes_tick(sc);
   3876 	else
   3877 		wm_tbi_tick(sc);
   3878 
   3879 	mutex_exit(sc->sc_core_lock);
   3880 
   3881 	if (wm_watchdog(ifp))
   3882 		callout_schedule(&sc->sc_tick_ch, hz);
   3883 }
   3884 
   3885 static int
   3886 wm_ifflags_cb(struct ethercom *ec)
   3887 {
   3888 	struct ifnet *ifp = &ec->ec_if;
   3889 	struct wm_softc *sc = ifp->if_softc;
   3890 	u_short iffchange;
   3891 	int ecchange;
   3892 	bool needreset = false;
   3893 	int rc = 0;
   3894 
   3895 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3896 		device_xname(sc->sc_dev), __func__));
   3897 
   3898 	KASSERT(IFNET_LOCKED(ifp));
   3899 
   3900 	mutex_enter(sc->sc_core_lock);
   3901 
   3902 	/*
   3903 	 * Check for if_flags.
   3904 	 * Main usage is to prevent linkdown when opening bpf.
   3905 	 */
   3906 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3907 	sc->sc_if_flags = ifp->if_flags;
   3908 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3909 		needreset = true;
   3910 		goto ec;
   3911 	}
   3912 
   3913 	/* iff related updates */
   3914 	if ((iffchange & IFF_PROMISC) != 0)
   3915 		wm_set_filter(sc);
   3916 
   3917 	wm_set_vlan(sc);
   3918 
   3919 ec:
   3920 	/* Check for ec_capenable. */
   3921 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3922 	sc->sc_ec_capenable = ec->ec_capenable;
   3923 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3924 		needreset = true;
   3925 		goto out;
   3926 	}
   3927 
   3928 	/* ec related updates */
   3929 	wm_set_eee(sc);
   3930 
   3931 out:
   3932 	if (needreset)
   3933 		rc = ENETRESET;
   3934 	mutex_exit(sc->sc_core_lock);
   3935 
   3936 	return rc;
   3937 }
   3938 
   3939 static bool
   3940 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3941 {
   3942 
   3943 	switch (sc->sc_phytype) {
   3944 	case WMPHY_82577: /* ihphy */
   3945 	case WMPHY_82578: /* atphy */
   3946 	case WMPHY_82579: /* ihphy */
   3947 	case WMPHY_I217: /* ihphy */
   3948 	case WMPHY_82580: /* ihphy */
   3949 	case WMPHY_I350: /* ihphy */
   3950 		return true;
   3951 	default:
   3952 		return false;
   3953 	}
   3954 }
   3955 
   3956 static void
   3957 wm_set_linkdown_discard(struct wm_softc *sc)
   3958 {
   3959 
   3960 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3961 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3962 
   3963 		mutex_enter(txq->txq_lock);
   3964 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3965 		mutex_exit(txq->txq_lock);
   3966 	}
   3967 }
   3968 
   3969 static void
   3970 wm_clear_linkdown_discard(struct wm_softc *sc)
   3971 {
   3972 
   3973 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3974 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3975 
   3976 		mutex_enter(txq->txq_lock);
   3977 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3978 		mutex_exit(txq->txq_lock);
   3979 	}
   3980 }
   3981 
   3982 /*
   3983  * wm_ioctl:		[ifnet interface function]
   3984  *
   3985  *	Handle control requests from the operator.
   3986  */
   3987 static int
   3988 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3989 {
   3990 	struct wm_softc *sc = ifp->if_softc;
   3991 	struct ifreq *ifr = (struct ifreq *)data;
   3992 	struct ifaddr *ifa = (struct ifaddr *)data;
   3993 	struct sockaddr_dl *sdl;
   3994 	int error;
   3995 
   3996 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3997 		device_xname(sc->sc_dev), __func__));
   3998 
   3999 	switch (cmd) {
   4000 	case SIOCADDMULTI:
   4001 	case SIOCDELMULTI:
   4002 		break;
   4003 	default:
   4004 		KASSERT(IFNET_LOCKED(ifp));
   4005 	}
   4006 
   4007 	switch (cmd) {
   4008 	case SIOCSIFMEDIA:
   4009 		mutex_enter(sc->sc_core_lock);
   4010 		/* Flow control requires full-duplex mode. */
   4011 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4012 		    (ifr->ifr_media & IFM_FDX) == 0)
   4013 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4014 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4015 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4016 				/* We can do both TXPAUSE and RXPAUSE. */
   4017 				ifr->ifr_media |=
   4018 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4019 			}
   4020 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4021 		}
   4022 		mutex_exit(sc->sc_core_lock);
   4023 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4024 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4025 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4026 				DPRINTF(sc, WM_DEBUG_LINK,
   4027 				    ("%s: %s: Set linkdown discard flag\n",
   4028 					device_xname(sc->sc_dev), __func__));
   4029 				wm_set_linkdown_discard(sc);
   4030 			}
   4031 		}
   4032 		break;
   4033 	case SIOCINITIFADDR:
   4034 		mutex_enter(sc->sc_core_lock);
   4035 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4036 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4037 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4038 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4039 			/* Unicast address is the first multicast entry */
   4040 			wm_set_filter(sc);
   4041 			error = 0;
   4042 			mutex_exit(sc->sc_core_lock);
   4043 			break;
   4044 		}
   4045 		mutex_exit(sc->sc_core_lock);
   4046 		/*FALLTHROUGH*/
   4047 	default:
   4048 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4049 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4050 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4051 				DPRINTF(sc, WM_DEBUG_LINK,
   4052 				    ("%s: %s: Set linkdown discard flag\n",
   4053 					device_xname(sc->sc_dev), __func__));
   4054 				wm_set_linkdown_discard(sc);
   4055 			}
   4056 		}
   4057 		const int s = splnet();
   4058 		/* It may call wm_start, so unlock here */
   4059 		error = ether_ioctl(ifp, cmd, data);
   4060 		splx(s);
   4061 		if (error != ENETRESET)
   4062 			break;
   4063 
   4064 		error = 0;
   4065 
   4066 		if (cmd == SIOCSIFCAP)
   4067 			error = if_init(ifp);
   4068 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4069 			mutex_enter(sc->sc_core_lock);
   4070 			if (sc->sc_if_flags & IFF_RUNNING) {
   4071 				/*
   4072 				 * Multicast list has changed; set the hardware filter
   4073 				 * accordingly.
   4074 				 */
   4075 				wm_set_filter(sc);
   4076 			}
   4077 			mutex_exit(sc->sc_core_lock);
   4078 		}
   4079 		break;
   4080 	}
   4081 
   4082 	return error;
   4083 }
   4084 
   4085 /* MAC address related */
   4086 
   4087 /*
   4088  * Get the offset of MAC address and return it.
   4089  * If error occured, use offset 0.
   4090  */
   4091 static uint16_t
   4092 wm_check_alt_mac_addr(struct wm_softc *sc)
   4093 {
   4094 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4095 	uint16_t offset = NVM_OFF_MACADDR;
   4096 
   4097 	/* Try to read alternative MAC address pointer */
   4098 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4099 		return 0;
   4100 
   4101 	/* Check pointer if it's valid or not. */
   4102 	if ((offset == 0x0000) || (offset == 0xffff))
   4103 		return 0;
   4104 
   4105 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4106 	/*
   4107 	 * Check whether alternative MAC address is valid or not.
   4108 	 * Some cards have non 0xffff pointer but those don't use
   4109 	 * alternative MAC address in reality.
   4110 	 *
   4111 	 * Check whether the broadcast bit is set or not.
   4112 	 */
   4113 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4114 		if (((myea[0] & 0xff) & 0x01) == 0)
   4115 			return offset; /* Found */
   4116 
   4117 	/* Not found */
   4118 	return 0;
   4119 }
   4120 
   4121 static int
   4122 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4123 {
   4124 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4125 	uint16_t offset = NVM_OFF_MACADDR;
   4126 	int do_invert = 0;
   4127 
   4128 	switch (sc->sc_type) {
   4129 	case WM_T_82580:
   4130 	case WM_T_I350:
   4131 	case WM_T_I354:
   4132 		/* EEPROM Top Level Partitioning */
   4133 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4134 		break;
   4135 	case WM_T_82571:
   4136 	case WM_T_82575:
   4137 	case WM_T_82576:
   4138 	case WM_T_80003:
   4139 	case WM_T_I210:
   4140 	case WM_T_I211:
   4141 		offset = wm_check_alt_mac_addr(sc);
   4142 		if (offset == 0)
   4143 			if ((sc->sc_funcid & 0x01) == 1)
   4144 				do_invert = 1;
   4145 		break;
   4146 	default:
   4147 		if ((sc->sc_funcid & 0x01) == 1)
   4148 			do_invert = 1;
   4149 		break;
   4150 	}
   4151 
   4152 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4153 		goto bad;
   4154 
   4155 	enaddr[0] = myea[0] & 0xff;
   4156 	enaddr[1] = myea[0] >> 8;
   4157 	enaddr[2] = myea[1] & 0xff;
   4158 	enaddr[3] = myea[1] >> 8;
   4159 	enaddr[4] = myea[2] & 0xff;
   4160 	enaddr[5] = myea[2] >> 8;
   4161 
   4162 	/*
   4163 	 * Toggle the LSB of the MAC address on the second port
   4164 	 * of some dual port cards.
   4165 	 */
   4166 	if (do_invert != 0)
   4167 		enaddr[5] ^= 1;
   4168 
   4169 	return 0;
   4170 
   4171  bad:
   4172 	return -1;
   4173 }
   4174 
   4175 /*
   4176  * wm_set_ral:
   4177  *
   4178  *	Set an entery in the receive address list.
   4179  */
   4180 static void
   4181 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4182 {
   4183 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4184 	uint32_t wlock_mac;
   4185 	int rv;
   4186 
   4187 	if (enaddr != NULL) {
   4188 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4189 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4190 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4191 		ral_hi |= RAL_AV;
   4192 	} else {
   4193 		ral_lo = 0;
   4194 		ral_hi = 0;
   4195 	}
   4196 
   4197 	switch (sc->sc_type) {
   4198 	case WM_T_82542_2_0:
   4199 	case WM_T_82542_2_1:
   4200 	case WM_T_82543:
   4201 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4202 		CSR_WRITE_FLUSH(sc);
   4203 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4204 		CSR_WRITE_FLUSH(sc);
   4205 		break;
   4206 	case WM_T_PCH2:
   4207 	case WM_T_PCH_LPT:
   4208 	case WM_T_PCH_SPT:
   4209 	case WM_T_PCH_CNP:
   4210 		if (idx == 0) {
   4211 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4212 			CSR_WRITE_FLUSH(sc);
   4213 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4214 			CSR_WRITE_FLUSH(sc);
   4215 			return;
   4216 		}
   4217 		if (sc->sc_type != WM_T_PCH2) {
   4218 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4219 			    FWSM_WLOCK_MAC);
   4220 			addrl = WMREG_SHRAL(idx - 1);
   4221 			addrh = WMREG_SHRAH(idx - 1);
   4222 		} else {
   4223 			wlock_mac = 0;
   4224 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4225 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4226 		}
   4227 
   4228 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4229 			rv = wm_get_swflag_ich8lan(sc);
   4230 			if (rv != 0)
   4231 				return;
   4232 			CSR_WRITE(sc, addrl, ral_lo);
   4233 			CSR_WRITE_FLUSH(sc);
   4234 			CSR_WRITE(sc, addrh, ral_hi);
   4235 			CSR_WRITE_FLUSH(sc);
   4236 			wm_put_swflag_ich8lan(sc);
   4237 		}
   4238 
   4239 		break;
   4240 	default:
   4241 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4242 		CSR_WRITE_FLUSH(sc);
   4243 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4244 		CSR_WRITE_FLUSH(sc);
   4245 		break;
   4246 	}
   4247 }
   4248 
   4249 /*
   4250  * wm_mchash:
   4251  *
   4252  *	Compute the hash of the multicast address for the 4096-bit
   4253  *	multicast filter.
   4254  */
   4255 static uint32_t
   4256 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4257 {
   4258 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4259 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4260 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4261 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4262 	uint32_t hash;
   4263 
   4264 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4265 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4266 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4267 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4268 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4269 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4270 		return (hash & 0x3ff);
   4271 	}
   4272 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4273 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4274 
   4275 	return (hash & 0xfff);
   4276 }
   4277 
   4278 /*
   4279  *
   4280  *
   4281  */
   4282 static int
   4283 wm_rar_count(struct wm_softc *sc)
   4284 {
   4285 	int size;
   4286 
   4287 	switch (sc->sc_type) {
   4288 	case WM_T_ICH8:
   4289 		size = WM_RAL_TABSIZE_ICH8 -1;
   4290 		break;
   4291 	case WM_T_ICH9:
   4292 	case WM_T_ICH10:
   4293 	case WM_T_PCH:
   4294 		size = WM_RAL_TABSIZE_ICH8;
   4295 		break;
   4296 	case WM_T_PCH2:
   4297 		size = WM_RAL_TABSIZE_PCH2;
   4298 		break;
   4299 	case WM_T_PCH_LPT:
   4300 	case WM_T_PCH_SPT:
   4301 	case WM_T_PCH_CNP:
   4302 		size = WM_RAL_TABSIZE_PCH_LPT;
   4303 		break;
   4304 	case WM_T_82575:
   4305 	case WM_T_I210:
   4306 	case WM_T_I211:
   4307 		size = WM_RAL_TABSIZE_82575;
   4308 		break;
   4309 	case WM_T_82576:
   4310 	case WM_T_82580:
   4311 		size = WM_RAL_TABSIZE_82576;
   4312 		break;
   4313 	case WM_T_I350:
   4314 	case WM_T_I354:
   4315 		size = WM_RAL_TABSIZE_I350;
   4316 		break;
   4317 	default:
   4318 		size = WM_RAL_TABSIZE;
   4319 	}
   4320 
   4321 	return size;
   4322 }
   4323 
   4324 /*
   4325  * wm_set_filter:
   4326  *
   4327  *	Set up the receive filter.
   4328  */
   4329 static void
   4330 wm_set_filter(struct wm_softc *sc)
   4331 {
   4332 	struct ethercom *ec = &sc->sc_ethercom;
   4333 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4334 	struct ether_multi *enm;
   4335 	struct ether_multistep step;
   4336 	bus_addr_t mta_reg;
   4337 	uint32_t hash, reg, bit;
   4338 	int i, size, ralmax, rv;
   4339 
   4340 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4341 		device_xname(sc->sc_dev), __func__));
   4342 	KASSERT(mutex_owned(sc->sc_core_lock));
   4343 
   4344 	if (sc->sc_type >= WM_T_82544)
   4345 		mta_reg = WMREG_CORDOVA_MTA;
   4346 	else
   4347 		mta_reg = WMREG_MTA;
   4348 
   4349 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4350 
   4351 	if (sc->sc_if_flags & IFF_BROADCAST)
   4352 		sc->sc_rctl |= RCTL_BAM;
   4353 	if (sc->sc_if_flags & IFF_PROMISC) {
   4354 		sc->sc_rctl |= RCTL_UPE;
   4355 		ETHER_LOCK(ec);
   4356 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4357 		ETHER_UNLOCK(ec);
   4358 		goto allmulti;
   4359 	}
   4360 
   4361 	/*
   4362 	 * Set the station address in the first RAL slot, and
   4363 	 * clear the remaining slots.
   4364 	 */
   4365 	size = wm_rar_count(sc);
   4366 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4367 
   4368 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4369 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4370 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4371 		switch (i) {
   4372 		case 0:
   4373 			/* We can use all entries */
   4374 			ralmax = size;
   4375 			break;
   4376 		case 1:
   4377 			/* Only RAR[0] */
   4378 			ralmax = 1;
   4379 			break;
   4380 		default:
   4381 			/* Available SHRA + RAR[0] */
   4382 			ralmax = i + 1;
   4383 		}
   4384 	} else
   4385 		ralmax = size;
   4386 	for (i = 1; i < size; i++) {
   4387 		if (i < ralmax)
   4388 			wm_set_ral(sc, NULL, i);
   4389 	}
   4390 
   4391 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4392 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4393 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4394 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4395 		size = WM_ICH8_MC_TABSIZE;
   4396 	else
   4397 		size = WM_MC_TABSIZE;
   4398 	/* Clear out the multicast table. */
   4399 	for (i = 0; i < size; i++) {
   4400 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4401 		CSR_WRITE_FLUSH(sc);
   4402 	}
   4403 
   4404 	ETHER_LOCK(ec);
   4405 	ETHER_FIRST_MULTI(step, ec, enm);
   4406 	while (enm != NULL) {
   4407 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4408 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4409 			ETHER_UNLOCK(ec);
   4410 			/*
   4411 			 * We must listen to a range of multicast addresses.
   4412 			 * For now, just accept all multicasts, rather than
   4413 			 * trying to set only those filter bits needed to match
   4414 			 * the range.  (At this time, the only use of address
   4415 			 * ranges is for IP multicast routing, for which the
   4416 			 * range is big enough to require all bits set.)
   4417 			 */
   4418 			goto allmulti;
   4419 		}
   4420 
   4421 		hash = wm_mchash(sc, enm->enm_addrlo);
   4422 
   4423 		reg = (hash >> 5);
   4424 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4425 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4426 		    || (sc->sc_type == WM_T_PCH2)
   4427 		    || (sc->sc_type == WM_T_PCH_LPT)
   4428 		    || (sc->sc_type == WM_T_PCH_SPT)
   4429 		    || (sc->sc_type == WM_T_PCH_CNP))
   4430 			reg &= 0x1f;
   4431 		else
   4432 			reg &= 0x7f;
   4433 		bit = hash & 0x1f;
   4434 
   4435 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4436 		hash |= 1U << bit;
   4437 
   4438 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4439 			/*
   4440 			 * 82544 Errata 9: Certain register cannot be written
   4441 			 * with particular alignments in PCI-X bus operation
   4442 			 * (FCAH, MTA and VFTA).
   4443 			 */
   4444 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4445 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4446 			CSR_WRITE_FLUSH(sc);
   4447 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4448 			CSR_WRITE_FLUSH(sc);
   4449 		} else {
   4450 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4451 			CSR_WRITE_FLUSH(sc);
   4452 		}
   4453 
   4454 		ETHER_NEXT_MULTI(step, enm);
   4455 	}
   4456 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4457 	ETHER_UNLOCK(ec);
   4458 
   4459 	goto setit;
   4460 
   4461  allmulti:
   4462 	sc->sc_rctl |= RCTL_MPE;
   4463 
   4464  setit:
   4465 	if (sc->sc_type >= WM_T_PCH2) {
   4466 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4467 		    && (ifp->if_mtu > ETHERMTU))
   4468 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4469 		else
   4470 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4471 		if (rv != 0)
   4472 			device_printf(sc->sc_dev,
   4473 			    "Failed to do workaround for jumbo frame.\n");
   4474 	}
   4475 
   4476 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4477 }
   4478 
   4479 /* Reset and init related */
   4480 
   4481 static void
   4482 wm_set_vlan(struct wm_softc *sc)
   4483 {
   4484 
   4485 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4486 		device_xname(sc->sc_dev), __func__));
   4487 
   4488 	/* Deal with VLAN enables. */
   4489 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4490 		sc->sc_ctrl |= CTRL_VME;
   4491 	else
   4492 		sc->sc_ctrl &= ~CTRL_VME;
   4493 
   4494 	/* Write the control registers. */
   4495 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4496 }
   4497 
   4498 static void
   4499 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4500 {
   4501 	uint32_t gcr;
   4502 	pcireg_t ctrl2;
   4503 
   4504 	gcr = CSR_READ(sc, WMREG_GCR);
   4505 
   4506 	/* Only take action if timeout value is defaulted to 0 */
   4507 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4508 		goto out;
   4509 
   4510 	if ((gcr & GCR_CAP_VER2) == 0) {
   4511 		gcr |= GCR_CMPL_TMOUT_10MS;
   4512 		goto out;
   4513 	}
   4514 
   4515 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4516 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4517 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4518 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4519 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4520 
   4521 out:
   4522 	/* Disable completion timeout resend */
   4523 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4524 
   4525 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4526 }
   4527 
   4528 void
   4529 wm_get_auto_rd_done(struct wm_softc *sc)
   4530 {
   4531 	int i;
   4532 
   4533 	/* wait for eeprom to reload */
   4534 	switch (sc->sc_type) {
   4535 	case WM_T_82571:
   4536 	case WM_T_82572:
   4537 	case WM_T_82573:
   4538 	case WM_T_82574:
   4539 	case WM_T_82583:
   4540 	case WM_T_82575:
   4541 	case WM_T_82576:
   4542 	case WM_T_82580:
   4543 	case WM_T_I350:
   4544 	case WM_T_I354:
   4545 	case WM_T_I210:
   4546 	case WM_T_I211:
   4547 	case WM_T_80003:
   4548 	case WM_T_ICH8:
   4549 	case WM_T_ICH9:
   4550 		for (i = 0; i < 10; i++) {
   4551 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4552 				break;
   4553 			delay(1000);
   4554 		}
   4555 		if (i == 10) {
   4556 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4557 			    "complete\n", device_xname(sc->sc_dev));
   4558 		}
   4559 		break;
   4560 	default:
   4561 		break;
   4562 	}
   4563 }
   4564 
   4565 void
   4566 wm_lan_init_done(struct wm_softc *sc)
   4567 {
   4568 	uint32_t reg = 0;
   4569 	int i;
   4570 
   4571 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4572 		device_xname(sc->sc_dev), __func__));
   4573 
   4574 	/* Wait for eeprom to reload */
   4575 	switch (sc->sc_type) {
   4576 	case WM_T_ICH10:
   4577 	case WM_T_PCH:
   4578 	case WM_T_PCH2:
   4579 	case WM_T_PCH_LPT:
   4580 	case WM_T_PCH_SPT:
   4581 	case WM_T_PCH_CNP:
   4582 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4583 			reg = CSR_READ(sc, WMREG_STATUS);
   4584 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4585 				break;
   4586 			delay(100);
   4587 		}
   4588 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4589 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4590 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4591 		}
   4592 		break;
   4593 	default:
   4594 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4595 		    __func__);
   4596 		break;
   4597 	}
   4598 
   4599 	reg &= ~STATUS_LAN_INIT_DONE;
   4600 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4601 }
   4602 
   4603 void
   4604 wm_get_cfg_done(struct wm_softc *sc)
   4605 {
   4606 	int mask;
   4607 	uint32_t reg;
   4608 	int i;
   4609 
   4610 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4611 		device_xname(sc->sc_dev), __func__));
   4612 
   4613 	/* Wait for eeprom to reload */
   4614 	switch (sc->sc_type) {
   4615 	case WM_T_82542_2_0:
   4616 	case WM_T_82542_2_1:
   4617 		/* null */
   4618 		break;
   4619 	case WM_T_82543:
   4620 	case WM_T_82544:
   4621 	case WM_T_82540:
   4622 	case WM_T_82545:
   4623 	case WM_T_82545_3:
   4624 	case WM_T_82546:
   4625 	case WM_T_82546_3:
   4626 	case WM_T_82541:
   4627 	case WM_T_82541_2:
   4628 	case WM_T_82547:
   4629 	case WM_T_82547_2:
   4630 	case WM_T_82573:
   4631 	case WM_T_82574:
   4632 	case WM_T_82583:
   4633 		/* generic */
   4634 		delay(10*1000);
   4635 		break;
   4636 	case WM_T_80003:
   4637 	case WM_T_82571:
   4638 	case WM_T_82572:
   4639 	case WM_T_82575:
   4640 	case WM_T_82576:
   4641 	case WM_T_82580:
   4642 	case WM_T_I350:
   4643 	case WM_T_I354:
   4644 	case WM_T_I210:
   4645 	case WM_T_I211:
   4646 		if (sc->sc_type == WM_T_82571) {
   4647 			/* Only 82571 shares port 0 */
   4648 			mask = EEMNGCTL_CFGDONE_0;
   4649 		} else
   4650 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4651 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4652 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4653 				break;
   4654 			delay(1000);
   4655 		}
   4656 		if (i >= WM_PHY_CFG_TIMEOUT)
   4657 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4658 				device_xname(sc->sc_dev), __func__));
   4659 		break;
   4660 	case WM_T_ICH8:
   4661 	case WM_T_ICH9:
   4662 	case WM_T_ICH10:
   4663 	case WM_T_PCH:
   4664 	case WM_T_PCH2:
   4665 	case WM_T_PCH_LPT:
   4666 	case WM_T_PCH_SPT:
   4667 	case WM_T_PCH_CNP:
   4668 		delay(10*1000);
   4669 		if (sc->sc_type >= WM_T_ICH10)
   4670 			wm_lan_init_done(sc);
   4671 		else
   4672 			wm_get_auto_rd_done(sc);
   4673 
   4674 		/* Clear PHY Reset Asserted bit */
   4675 		reg = CSR_READ(sc, WMREG_STATUS);
   4676 		if ((reg & STATUS_PHYRA) != 0)
   4677 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4678 		break;
   4679 	default:
   4680 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4681 		    __func__);
   4682 		break;
   4683 	}
   4684 }
   4685 
   4686 int
   4687 wm_phy_post_reset(struct wm_softc *sc)
   4688 {
   4689 	device_t dev = sc->sc_dev;
   4690 	uint16_t reg;
   4691 	int rv = 0;
   4692 
   4693 	/* This function is only for ICH8 and newer. */
   4694 	if (sc->sc_type < WM_T_ICH8)
   4695 		return 0;
   4696 
   4697 	if (wm_phy_resetisblocked(sc)) {
   4698 		/* XXX */
   4699 		device_printf(dev, "PHY is blocked\n");
   4700 		return -1;
   4701 	}
   4702 
   4703 	/* Allow time for h/w to get to quiescent state after reset */
   4704 	delay(10*1000);
   4705 
   4706 	/* Perform any necessary post-reset workarounds */
   4707 	if (sc->sc_type == WM_T_PCH)
   4708 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4709 	else if (sc->sc_type == WM_T_PCH2)
   4710 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4711 	if (rv != 0)
   4712 		return rv;
   4713 
   4714 	/* Clear the host wakeup bit after lcd reset */
   4715 	if (sc->sc_type >= WM_T_PCH) {
   4716 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4717 		reg &= ~BM_WUC_HOST_WU_BIT;
   4718 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4719 	}
   4720 
   4721 	/* Configure the LCD with the extended configuration region in NVM */
   4722 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4723 		return rv;
   4724 
   4725 	/* Configure the LCD with the OEM bits in NVM */
   4726 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4727 
   4728 	if (sc->sc_type == WM_T_PCH2) {
   4729 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4730 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4731 			delay(10 * 1000);
   4732 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4733 		}
   4734 		/* Set EEE LPI Update Timer to 200usec */
   4735 		rv = sc->phy.acquire(sc);
   4736 		if (rv)
   4737 			return rv;
   4738 		rv = wm_write_emi_reg_locked(dev,
   4739 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4740 		sc->phy.release(sc);
   4741 	}
   4742 
   4743 	return rv;
   4744 }
   4745 
   4746 /* Only for PCH and newer */
   4747 static int
   4748 wm_write_smbus_addr(struct wm_softc *sc)
   4749 {
   4750 	uint32_t strap, freq;
   4751 	uint16_t phy_data;
   4752 	int rv;
   4753 
   4754 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4755 		device_xname(sc->sc_dev), __func__));
   4756 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4757 
   4758 	strap = CSR_READ(sc, WMREG_STRAP);
   4759 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4760 
   4761 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4762 	if (rv != 0)
   4763 		return rv;
   4764 
   4765 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4766 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4767 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4768 
   4769 	if (sc->sc_phytype == WMPHY_I217) {
   4770 		/* Restore SMBus frequency */
   4771 		if (freq --) {
   4772 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4773 			    | HV_SMB_ADDR_FREQ_HIGH);
   4774 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4775 			    HV_SMB_ADDR_FREQ_LOW);
   4776 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4777 			    HV_SMB_ADDR_FREQ_HIGH);
   4778 		} else
   4779 			DPRINTF(sc, WM_DEBUG_INIT,
   4780 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4781 				device_xname(sc->sc_dev), __func__));
   4782 	}
   4783 
   4784 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4785 	    phy_data);
   4786 }
   4787 
   4788 static int
   4789 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4790 {
   4791 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4792 	uint16_t phy_page = 0;
   4793 	int rv = 0;
   4794 
   4795 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4796 		device_xname(sc->sc_dev), __func__));
   4797 
   4798 	switch (sc->sc_type) {
   4799 	case WM_T_ICH8:
   4800 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4801 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4802 			return 0;
   4803 
   4804 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4805 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4806 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4807 			break;
   4808 		}
   4809 		/* FALLTHROUGH */
   4810 	case WM_T_PCH:
   4811 	case WM_T_PCH2:
   4812 	case WM_T_PCH_LPT:
   4813 	case WM_T_PCH_SPT:
   4814 	case WM_T_PCH_CNP:
   4815 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4816 		break;
   4817 	default:
   4818 		return 0;
   4819 	}
   4820 
   4821 	if ((rv = sc->phy.acquire(sc)) != 0)
   4822 		return rv;
   4823 
   4824 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4825 	if ((reg & sw_cfg_mask) == 0)
   4826 		goto release;
   4827 
   4828 	/*
   4829 	 * Make sure HW does not configure LCD from PHY extended configuration
   4830 	 * before SW configuration
   4831 	 */
   4832 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4833 	if ((sc->sc_type < WM_T_PCH2)
   4834 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4835 		goto release;
   4836 
   4837 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4838 		device_xname(sc->sc_dev), __func__));
   4839 	/* word_addr is in DWORD */
   4840 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4841 
   4842 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4843 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4844 	if (cnf_size == 0)
   4845 		goto release;
   4846 
   4847 	if (((sc->sc_type == WM_T_PCH)
   4848 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4849 	    || (sc->sc_type > WM_T_PCH)) {
   4850 		/*
   4851 		 * HW configures the SMBus address and LEDs when the OEM and
   4852 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4853 		 * are cleared, SW will configure them instead.
   4854 		 */
   4855 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4856 			device_xname(sc->sc_dev), __func__));
   4857 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4858 			goto release;
   4859 
   4860 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4861 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4862 		    (uint16_t)reg);
   4863 		if (rv != 0)
   4864 			goto release;
   4865 	}
   4866 
   4867 	/* Configure LCD from extended configuration region. */
   4868 	for (i = 0; i < cnf_size; i++) {
   4869 		uint16_t reg_data, reg_addr;
   4870 
   4871 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4872 			goto release;
   4873 
   4874 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4875 			goto release;
   4876 
   4877 		if (reg_addr == IGPHY_PAGE_SELECT)
   4878 			phy_page = reg_data;
   4879 
   4880 		reg_addr &= IGPHY_MAXREGADDR;
   4881 		reg_addr |= phy_page;
   4882 
   4883 		KASSERT(sc->phy.writereg_locked != NULL);
   4884 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4885 		    reg_data);
   4886 	}
   4887 
   4888 release:
   4889 	sc->phy.release(sc);
   4890 	return rv;
   4891 }
   4892 
   4893 /*
   4894  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4895  *  @sc:       pointer to the HW structure
   4896  *  @d0_state: boolean if entering d0 or d3 device state
   4897  *
   4898  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4899  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4900  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4901  */
   4902 int
   4903 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4904 {
   4905 	uint32_t mac_reg;
   4906 	uint16_t oem_reg;
   4907 	int rv;
   4908 
   4909 	if (sc->sc_type < WM_T_PCH)
   4910 		return 0;
   4911 
   4912 	rv = sc->phy.acquire(sc);
   4913 	if (rv != 0)
   4914 		return rv;
   4915 
   4916 	if (sc->sc_type == WM_T_PCH) {
   4917 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4918 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4919 			goto release;
   4920 	}
   4921 
   4922 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4923 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4924 		goto release;
   4925 
   4926 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4927 
   4928 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4929 	if (rv != 0)
   4930 		goto release;
   4931 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4932 
   4933 	if (d0_state) {
   4934 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4935 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4936 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4937 			oem_reg |= HV_OEM_BITS_LPLU;
   4938 	} else {
   4939 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4940 		    != 0)
   4941 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4942 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4943 		    != 0)
   4944 			oem_reg |= HV_OEM_BITS_LPLU;
   4945 	}
   4946 
   4947 	/* Set Restart auto-neg to activate the bits */
   4948 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4949 	    && (wm_phy_resetisblocked(sc) == false))
   4950 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4951 
   4952 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4953 
   4954 release:
   4955 	sc->phy.release(sc);
   4956 
   4957 	return rv;
   4958 }
   4959 
   4960 /* Init hardware bits */
   4961 void
   4962 wm_initialize_hardware_bits(struct wm_softc *sc)
   4963 {
   4964 	uint32_t tarc0, tarc1, reg;
   4965 
   4966 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4967 		device_xname(sc->sc_dev), __func__));
   4968 
   4969 	/* For 82571 variant, 80003 and ICHs */
   4970 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4971 	    || (sc->sc_type >= WM_T_80003)) {
   4972 
   4973 		/* Transmit Descriptor Control 0 */
   4974 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4975 		reg |= TXDCTL_COUNT_DESC;
   4976 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4977 
   4978 		/* Transmit Descriptor Control 1 */
   4979 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4980 		reg |= TXDCTL_COUNT_DESC;
   4981 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4982 
   4983 		/* TARC0 */
   4984 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4985 		switch (sc->sc_type) {
   4986 		case WM_T_82571:
   4987 		case WM_T_82572:
   4988 		case WM_T_82573:
   4989 		case WM_T_82574:
   4990 		case WM_T_82583:
   4991 		case WM_T_80003:
   4992 			/* Clear bits 30..27 */
   4993 			tarc0 &= ~__BITS(30, 27);
   4994 			break;
   4995 		default:
   4996 			break;
   4997 		}
   4998 
   4999 		switch (sc->sc_type) {
   5000 		case WM_T_82571:
   5001 		case WM_T_82572:
   5002 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5003 
   5004 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5005 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5006 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5007 			/* 8257[12] Errata No.7 */
   5008 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5009 
   5010 			/* TARC1 bit 28 */
   5011 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5012 				tarc1 &= ~__BIT(28);
   5013 			else
   5014 				tarc1 |= __BIT(28);
   5015 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5016 
   5017 			/*
   5018 			 * 8257[12] Errata No.13
   5019 			 * Disable Dyamic Clock Gating.
   5020 			 */
   5021 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5022 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5023 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5024 			break;
   5025 		case WM_T_82573:
   5026 		case WM_T_82574:
   5027 		case WM_T_82583:
   5028 			if ((sc->sc_type == WM_T_82574)
   5029 			    || (sc->sc_type == WM_T_82583))
   5030 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5031 
   5032 			/* Extended Device Control */
   5033 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5034 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5035 			reg |= __BIT(22);	/* Set bit 22 */
   5036 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5037 
   5038 			/* Device Control */
   5039 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5040 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5041 
   5042 			/* PCIe Control Register */
   5043 			/*
   5044 			 * 82573 Errata (unknown).
   5045 			 *
   5046 			 * 82574 Errata 25 and 82583 Errata 12
   5047 			 * "Dropped Rx Packets":
   5048 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5049 			 */
   5050 			reg = CSR_READ(sc, WMREG_GCR);
   5051 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5052 			CSR_WRITE(sc, WMREG_GCR, reg);
   5053 
   5054 			if ((sc->sc_type == WM_T_82574)
   5055 			    || (sc->sc_type == WM_T_82583)) {
   5056 				/*
   5057 				 * Document says this bit must be set for
   5058 				 * proper operation.
   5059 				 */
   5060 				reg = CSR_READ(sc, WMREG_GCR);
   5061 				reg |= __BIT(22);
   5062 				CSR_WRITE(sc, WMREG_GCR, reg);
   5063 
   5064 				/*
   5065 				 * Apply workaround for hardware errata
   5066 				 * documented in errata docs Fixes issue where
   5067 				 * some error prone or unreliable PCIe
   5068 				 * completions are occurring, particularly
   5069 				 * with ASPM enabled. Without fix, issue can
   5070 				 * cause Tx timeouts.
   5071 				 */
   5072 				reg = CSR_READ(sc, WMREG_GCR2);
   5073 				reg |= __BIT(0);
   5074 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5075 			}
   5076 			break;
   5077 		case WM_T_80003:
   5078 			/* TARC0 */
   5079 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5080 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5081 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5082 
   5083 			/* TARC1 bit 28 */
   5084 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5085 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5086 				tarc1 &= ~__BIT(28);
   5087 			else
   5088 				tarc1 |= __BIT(28);
   5089 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5090 			break;
   5091 		case WM_T_ICH8:
   5092 		case WM_T_ICH9:
   5093 		case WM_T_ICH10:
   5094 		case WM_T_PCH:
   5095 		case WM_T_PCH2:
   5096 		case WM_T_PCH_LPT:
   5097 		case WM_T_PCH_SPT:
   5098 		case WM_T_PCH_CNP:
   5099 			/* TARC0 */
   5100 			if (sc->sc_type == WM_T_ICH8) {
   5101 				/* Set TARC0 bits 29 and 28 */
   5102 				tarc0 |= __BITS(29, 28);
   5103 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5104 				tarc0 |= __BIT(29);
   5105 				/*
   5106 				 *  Drop bit 28. From Linux.
   5107 				 * See I218/I219 spec update
   5108 				 * "5. Buffer Overrun While the I219 is
   5109 				 * Processing DMA Transactions"
   5110 				 */
   5111 				tarc0 &= ~__BIT(28);
   5112 			}
   5113 			/* Set TARC0 bits 23,24,26,27 */
   5114 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5115 
   5116 			/* CTRL_EXT */
   5117 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5118 			reg |= __BIT(22);	/* Set bit 22 */
   5119 			/*
   5120 			 * Enable PHY low-power state when MAC is at D3
   5121 			 * w/o WoL
   5122 			 */
   5123 			if (sc->sc_type >= WM_T_PCH)
   5124 				reg |= CTRL_EXT_PHYPDEN;
   5125 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5126 
   5127 			/* TARC1 */
   5128 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5129 			/* bit 28 */
   5130 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5131 				tarc1 &= ~__BIT(28);
   5132 			else
   5133 				tarc1 |= __BIT(28);
   5134 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5135 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5136 
   5137 			/* Device Status */
   5138 			if (sc->sc_type == WM_T_ICH8) {
   5139 				reg = CSR_READ(sc, WMREG_STATUS);
   5140 				reg &= ~__BIT(31);
   5141 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5142 
   5143 			}
   5144 
   5145 			/* IOSFPC */
   5146 			if (sc->sc_type == WM_T_PCH_SPT) {
   5147 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5148 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5149 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5150 			}
   5151 			/*
   5152 			 * Work-around descriptor data corruption issue during
   5153 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5154 			 * capability.
   5155 			 */
   5156 			reg = CSR_READ(sc, WMREG_RFCTL);
   5157 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5158 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5159 			break;
   5160 		default:
   5161 			break;
   5162 		}
   5163 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5164 
   5165 		switch (sc->sc_type) {
   5166 		/*
   5167 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   5168 		 * Avoid RSS Hash Value bug.
   5169 		 */
   5170 		case WM_T_82571:
   5171 		case WM_T_82572:
   5172 		case WM_T_82573:
   5173 		case WM_T_80003:
   5174 		case WM_T_ICH8:
   5175 			reg = CSR_READ(sc, WMREG_RFCTL);
   5176 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5177 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5178 			break;
   5179 		case WM_T_82574:
   5180 			/* Use extened Rx descriptor. */
   5181 			reg = CSR_READ(sc, WMREG_RFCTL);
   5182 			reg |= WMREG_RFCTL_EXSTEN;
   5183 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5184 			break;
   5185 		default:
   5186 			break;
   5187 		}
   5188 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5189 		/*
   5190 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5191 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5192 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5193 		 * Correctly by the Device"
   5194 		 *
   5195 		 * I354(C2000) Errata AVR53:
   5196 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5197 		 * Hang"
   5198 		 */
   5199 		reg = CSR_READ(sc, WMREG_RFCTL);
   5200 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5201 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5202 	}
   5203 }
   5204 
   5205 static uint32_t
   5206 wm_rxpbs_adjust_82580(uint32_t val)
   5207 {
   5208 	uint32_t rv = 0;
   5209 
   5210 	if (val < __arraycount(wm_82580_rxpbs_table))
   5211 		rv = wm_82580_rxpbs_table[val];
   5212 
   5213 	return rv;
   5214 }
   5215 
   5216 /*
   5217  * wm_reset_phy:
   5218  *
   5219  *	generic PHY reset function.
   5220  *	Same as e1000_phy_hw_reset_generic()
   5221  */
   5222 static int
   5223 wm_reset_phy(struct wm_softc *sc)
   5224 {
   5225 	uint32_t reg;
   5226 	int rv;
   5227 
   5228 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5229 		device_xname(sc->sc_dev), __func__));
   5230 	if (wm_phy_resetisblocked(sc))
   5231 		return -1;
   5232 
   5233 	rv = sc->phy.acquire(sc);
   5234 	if (rv) {
   5235 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5236 		    __func__, rv);
   5237 		return rv;
   5238 	}
   5239 
   5240 	reg = CSR_READ(sc, WMREG_CTRL);
   5241 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5242 	CSR_WRITE_FLUSH(sc);
   5243 
   5244 	delay(sc->phy.reset_delay_us);
   5245 
   5246 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5247 	CSR_WRITE_FLUSH(sc);
   5248 
   5249 	delay(150);
   5250 
   5251 	sc->phy.release(sc);
   5252 
   5253 	wm_get_cfg_done(sc);
   5254 	wm_phy_post_reset(sc);
   5255 
   5256 	return 0;
   5257 }
   5258 
   5259 /*
   5260  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5261  *
   5262  * In i219, the descriptor rings must be emptied before resetting the HW
   5263  * or before changing the device state to D3 during runtime (runtime PM).
   5264  *
   5265  * Failure to do this will cause the HW to enter a unit hang state which can
   5266  * only be released by PCI reset on the device.
   5267  *
   5268  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5269  */
   5270 static void
   5271 wm_flush_desc_rings(struct wm_softc *sc)
   5272 {
   5273 	pcireg_t preg;
   5274 	uint32_t reg;
   5275 	struct wm_txqueue *txq;
   5276 	wiseman_txdesc_t *txd;
   5277 	int nexttx;
   5278 	uint32_t rctl;
   5279 
   5280 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5281 
   5282 	/* First, disable MULR fix in FEXTNVM11 */
   5283 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5284 	reg |= FEXTNVM11_DIS_MULRFIX;
   5285 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5286 
   5287 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5288 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5289 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5290 		return;
   5291 
   5292 	/*
   5293 	 * Remove all descriptors from the tx_ring.
   5294 	 *
   5295 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5296 	 * happens when the HW reads the regs. We assign the ring itself as
   5297 	 * the data of the next descriptor. We don't care about the data we are
   5298 	 * about to reset the HW.
   5299 	 */
   5300 #ifdef WM_DEBUG
   5301 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5302 #endif
   5303 	reg = CSR_READ(sc, WMREG_TCTL);
   5304 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5305 
   5306 	txq = &sc->sc_queue[0].wmq_txq;
   5307 	nexttx = txq->txq_next;
   5308 	txd = &txq->txq_descs[nexttx];
   5309 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5310 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5311 	txd->wtx_fields.wtxu_status = 0;
   5312 	txd->wtx_fields.wtxu_options = 0;
   5313 	txd->wtx_fields.wtxu_vlan = 0;
   5314 
   5315 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5316 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5317 
   5318 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5319 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5320 	CSR_WRITE_FLUSH(sc);
   5321 	delay(250);
   5322 
   5323 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5324 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5325 		return;
   5326 
   5327 	/*
   5328 	 * Mark all descriptors in the RX ring as consumed and disable the
   5329 	 * rx ring.
   5330 	 */
   5331 #ifdef WM_DEBUG
   5332 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5333 #endif
   5334 	rctl = CSR_READ(sc, WMREG_RCTL);
   5335 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5336 	CSR_WRITE_FLUSH(sc);
   5337 	delay(150);
   5338 
   5339 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5340 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5341 	reg &= 0xffffc000;
   5342 	/*
   5343 	 * Update thresholds: prefetch threshold to 31, host threshold
   5344 	 * to 1 and make sure the granularity is "descriptors" and not
   5345 	 * "cache lines"
   5346 	 */
   5347 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5348 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5349 
   5350 	/* Momentarily enable the RX ring for the changes to take effect */
   5351 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5352 	CSR_WRITE_FLUSH(sc);
   5353 	delay(150);
   5354 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5355 }
   5356 
   5357 /*
   5358  * wm_reset:
   5359  *
   5360  *	Reset the i82542 chip.
   5361  */
   5362 static void
   5363 wm_reset(struct wm_softc *sc)
   5364 {
   5365 	int phy_reset = 0;
   5366 	int i, error = 0;
   5367 	uint32_t reg;
   5368 	uint16_t kmreg;
   5369 	int rv;
   5370 
   5371 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5372 		device_xname(sc->sc_dev), __func__));
   5373 	KASSERT(sc->sc_type != 0);
   5374 
   5375 	/*
   5376 	 * Allocate on-chip memory according to the MTU size.
   5377 	 * The Packet Buffer Allocation register must be written
   5378 	 * before the chip is reset.
   5379 	 */
   5380 	switch (sc->sc_type) {
   5381 	case WM_T_82547:
   5382 	case WM_T_82547_2:
   5383 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5384 		    PBA_22K : PBA_30K;
   5385 		for (i = 0; i < sc->sc_nqueues; i++) {
   5386 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5387 			txq->txq_fifo_head = 0;
   5388 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5389 			txq->txq_fifo_size =
   5390 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5391 			txq->txq_fifo_stall = 0;
   5392 		}
   5393 		break;
   5394 	case WM_T_82571:
   5395 	case WM_T_82572:
   5396 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5397 	case WM_T_80003:
   5398 		sc->sc_pba = PBA_32K;
   5399 		break;
   5400 	case WM_T_82573:
   5401 		sc->sc_pba = PBA_12K;
   5402 		break;
   5403 	case WM_T_82574:
   5404 	case WM_T_82583:
   5405 		sc->sc_pba = PBA_20K;
   5406 		break;
   5407 	case WM_T_82576:
   5408 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5409 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5410 		break;
   5411 	case WM_T_82580:
   5412 	case WM_T_I350:
   5413 	case WM_T_I354:
   5414 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5415 		break;
   5416 	case WM_T_I210:
   5417 	case WM_T_I211:
   5418 		sc->sc_pba = PBA_34K;
   5419 		break;
   5420 	case WM_T_ICH8:
   5421 		/* Workaround for a bit corruption issue in FIFO memory */
   5422 		sc->sc_pba = PBA_8K;
   5423 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5424 		break;
   5425 	case WM_T_ICH9:
   5426 	case WM_T_ICH10:
   5427 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5428 		    PBA_14K : PBA_10K;
   5429 		break;
   5430 	case WM_T_PCH:
   5431 	case WM_T_PCH2:	/* XXX 14K? */
   5432 	case WM_T_PCH_LPT:
   5433 	case WM_T_PCH_SPT:
   5434 	case WM_T_PCH_CNP:
   5435 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5436 		    PBA_12K : PBA_26K;
   5437 		break;
   5438 	default:
   5439 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5440 		    PBA_40K : PBA_48K;
   5441 		break;
   5442 	}
   5443 	/*
   5444 	 * Only old or non-multiqueue devices have the PBA register
   5445 	 * XXX Need special handling for 82575.
   5446 	 */
   5447 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5448 	    || (sc->sc_type == WM_T_82575))
   5449 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5450 
   5451 	/* Prevent the PCI-E bus from sticking */
   5452 	if (sc->sc_flags & WM_F_PCIE) {
   5453 		int timeout = 800;
   5454 
   5455 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5456 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5457 
   5458 		while (timeout--) {
   5459 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5460 			    == 0)
   5461 				break;
   5462 			delay(100);
   5463 		}
   5464 		if (timeout == 0)
   5465 			device_printf(sc->sc_dev,
   5466 			    "failed to disable bus mastering\n");
   5467 	}
   5468 
   5469 	/* Set the completion timeout for interface */
   5470 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5471 	    || (sc->sc_type == WM_T_82580)
   5472 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5473 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5474 		wm_set_pcie_completion_timeout(sc);
   5475 
   5476 	/* Clear interrupt */
   5477 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5478 	if (wm_is_using_msix(sc)) {
   5479 		if (sc->sc_type != WM_T_82574) {
   5480 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5481 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5482 		} else
   5483 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5484 	}
   5485 
   5486 	/* Stop the transmit and receive processes. */
   5487 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5488 	sc->sc_rctl &= ~RCTL_EN;
   5489 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5490 	CSR_WRITE_FLUSH(sc);
   5491 
   5492 	/* XXX set_tbi_sbp_82543() */
   5493 
   5494 	delay(10*1000);
   5495 
   5496 	/* Must acquire the MDIO ownership before MAC reset */
   5497 	switch (sc->sc_type) {
   5498 	case WM_T_82573:
   5499 	case WM_T_82574:
   5500 	case WM_T_82583:
   5501 		error = wm_get_hw_semaphore_82573(sc);
   5502 		break;
   5503 	default:
   5504 		break;
   5505 	}
   5506 
   5507 	/*
   5508 	 * 82541 Errata 29? & 82547 Errata 28?
   5509 	 * See also the description about PHY_RST bit in CTRL register
   5510 	 * in 8254x_GBe_SDM.pdf.
   5511 	 */
   5512 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5513 		CSR_WRITE(sc, WMREG_CTRL,
   5514 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5515 		CSR_WRITE_FLUSH(sc);
   5516 		delay(5000);
   5517 	}
   5518 
   5519 	switch (sc->sc_type) {
   5520 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5521 	case WM_T_82541:
   5522 	case WM_T_82541_2:
   5523 	case WM_T_82547:
   5524 	case WM_T_82547_2:
   5525 		/*
   5526 		 * On some chipsets, a reset through a memory-mapped write
   5527 		 * cycle can cause the chip to reset before completing the
   5528 		 * write cycle. This causes major headache that can be avoided
   5529 		 * by issuing the reset via indirect register writes through
   5530 		 * I/O space.
   5531 		 *
   5532 		 * So, if we successfully mapped the I/O BAR at attach time,
   5533 		 * use that. Otherwise, try our luck with a memory-mapped
   5534 		 * reset.
   5535 		 */
   5536 		if (sc->sc_flags & WM_F_IOH_VALID)
   5537 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5538 		else
   5539 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5540 		break;
   5541 	case WM_T_82545_3:
   5542 	case WM_T_82546_3:
   5543 		/* Use the shadow control register on these chips. */
   5544 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5545 		break;
   5546 	case WM_T_80003:
   5547 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5548 		if (sc->phy.acquire(sc) != 0)
   5549 			break;
   5550 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5551 		sc->phy.release(sc);
   5552 		break;
   5553 	case WM_T_ICH8:
   5554 	case WM_T_ICH9:
   5555 	case WM_T_ICH10:
   5556 	case WM_T_PCH:
   5557 	case WM_T_PCH2:
   5558 	case WM_T_PCH_LPT:
   5559 	case WM_T_PCH_SPT:
   5560 	case WM_T_PCH_CNP:
   5561 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5562 		if (wm_phy_resetisblocked(sc) == false) {
   5563 			/*
   5564 			 * Gate automatic PHY configuration by hardware on
   5565 			 * non-managed 82579
   5566 			 */
   5567 			if ((sc->sc_type == WM_T_PCH2)
   5568 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5569 				== 0))
   5570 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5571 
   5572 			reg |= CTRL_PHY_RESET;
   5573 			phy_reset = 1;
   5574 		} else
   5575 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5576 		if (sc->phy.acquire(sc) != 0)
   5577 			break;
   5578 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5579 		/* Don't insert a completion barrier when reset */
   5580 		delay(20*1000);
   5581 		mutex_exit(sc->sc_ich_phymtx);
   5582 		break;
   5583 	case WM_T_82580:
   5584 	case WM_T_I350:
   5585 	case WM_T_I354:
   5586 	case WM_T_I210:
   5587 	case WM_T_I211:
   5588 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5589 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5590 			CSR_WRITE_FLUSH(sc);
   5591 		delay(5000);
   5592 		break;
   5593 	case WM_T_82542_2_0:
   5594 	case WM_T_82542_2_1:
   5595 	case WM_T_82543:
   5596 	case WM_T_82540:
   5597 	case WM_T_82545:
   5598 	case WM_T_82546:
   5599 	case WM_T_82571:
   5600 	case WM_T_82572:
   5601 	case WM_T_82573:
   5602 	case WM_T_82574:
   5603 	case WM_T_82575:
   5604 	case WM_T_82576:
   5605 	case WM_T_82583:
   5606 	default:
   5607 		/* Everything else can safely use the documented method. */
   5608 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5609 		break;
   5610 	}
   5611 
   5612 	/* Must release the MDIO ownership after MAC reset */
   5613 	switch (sc->sc_type) {
   5614 	case WM_T_82573:
   5615 	case WM_T_82574:
   5616 	case WM_T_82583:
   5617 		if (error == 0)
   5618 			wm_put_hw_semaphore_82573(sc);
   5619 		break;
   5620 	default:
   5621 		break;
   5622 	}
   5623 
   5624 	/* Set Phy Config Counter to 50msec */
   5625 	if (sc->sc_type == WM_T_PCH2) {
   5626 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5627 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5628 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5629 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5630 	}
   5631 
   5632 	if (phy_reset != 0)
   5633 		wm_get_cfg_done(sc);
   5634 
   5635 	/* Reload EEPROM */
   5636 	switch (sc->sc_type) {
   5637 	case WM_T_82542_2_0:
   5638 	case WM_T_82542_2_1:
   5639 	case WM_T_82543:
   5640 	case WM_T_82544:
   5641 		delay(10);
   5642 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5643 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5644 		CSR_WRITE_FLUSH(sc);
   5645 		delay(2000);
   5646 		break;
   5647 	case WM_T_82540:
   5648 	case WM_T_82545:
   5649 	case WM_T_82545_3:
   5650 	case WM_T_82546:
   5651 	case WM_T_82546_3:
   5652 		delay(5*1000);
   5653 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5654 		break;
   5655 	case WM_T_82541:
   5656 	case WM_T_82541_2:
   5657 	case WM_T_82547:
   5658 	case WM_T_82547_2:
   5659 		delay(20000);
   5660 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5661 		break;
   5662 	case WM_T_82571:
   5663 	case WM_T_82572:
   5664 	case WM_T_82573:
   5665 	case WM_T_82574:
   5666 	case WM_T_82583:
   5667 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5668 			delay(10);
   5669 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5670 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5671 			CSR_WRITE_FLUSH(sc);
   5672 		}
   5673 		/* check EECD_EE_AUTORD */
   5674 		wm_get_auto_rd_done(sc);
   5675 		/*
   5676 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5677 		 * is set.
   5678 		 */
   5679 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5680 		    || (sc->sc_type == WM_T_82583))
   5681 			delay(25*1000);
   5682 		break;
   5683 	case WM_T_82575:
   5684 	case WM_T_82576:
   5685 	case WM_T_82580:
   5686 	case WM_T_I350:
   5687 	case WM_T_I354:
   5688 	case WM_T_I210:
   5689 	case WM_T_I211:
   5690 	case WM_T_80003:
   5691 		/* check EECD_EE_AUTORD */
   5692 		wm_get_auto_rd_done(sc);
   5693 		break;
   5694 	case WM_T_ICH8:
   5695 	case WM_T_ICH9:
   5696 	case WM_T_ICH10:
   5697 	case WM_T_PCH:
   5698 	case WM_T_PCH2:
   5699 	case WM_T_PCH_LPT:
   5700 	case WM_T_PCH_SPT:
   5701 	case WM_T_PCH_CNP:
   5702 		break;
   5703 	default:
   5704 		panic("%s: unknown type\n", __func__);
   5705 	}
   5706 
   5707 	/* Check whether EEPROM is present or not */
   5708 	switch (sc->sc_type) {
   5709 	case WM_T_82575:
   5710 	case WM_T_82576:
   5711 	case WM_T_82580:
   5712 	case WM_T_I350:
   5713 	case WM_T_I354:
   5714 	case WM_T_ICH8:
   5715 	case WM_T_ICH9:
   5716 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5717 			/* Not found */
   5718 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5719 			if (sc->sc_type == WM_T_82575)
   5720 				wm_reset_init_script_82575(sc);
   5721 		}
   5722 		break;
   5723 	default:
   5724 		break;
   5725 	}
   5726 
   5727 	if (phy_reset != 0)
   5728 		wm_phy_post_reset(sc);
   5729 
   5730 	if ((sc->sc_type == WM_T_82580)
   5731 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5732 		/* Clear global device reset status bit */
   5733 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5734 	}
   5735 
   5736 	/* Clear any pending interrupt events. */
   5737 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5738 	reg = CSR_READ(sc, WMREG_ICR);
   5739 	if (wm_is_using_msix(sc)) {
   5740 		if (sc->sc_type != WM_T_82574) {
   5741 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5742 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5743 		} else
   5744 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5745 	}
   5746 
   5747 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5748 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5749 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5750 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5751 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5752 		reg |= KABGTXD_BGSQLBIAS;
   5753 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5754 	}
   5755 
   5756 	/* Reload sc_ctrl */
   5757 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5758 
   5759 	wm_set_eee(sc);
   5760 
   5761 	/*
   5762 	 * For PCH, this write will make sure that any noise will be detected
   5763 	 * as a CRC error and be dropped rather than show up as a bad packet
   5764 	 * to the DMA engine
   5765 	 */
   5766 	if (sc->sc_type == WM_T_PCH)
   5767 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5768 
   5769 	if (sc->sc_type >= WM_T_82544)
   5770 		CSR_WRITE(sc, WMREG_WUC, 0);
   5771 
   5772 	if (sc->sc_type < WM_T_82575)
   5773 		wm_disable_aspm(sc); /* Workaround for some chips */
   5774 
   5775 	wm_reset_mdicnfg_82580(sc);
   5776 
   5777 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5778 		wm_pll_workaround_i210(sc);
   5779 
   5780 	if (sc->sc_type == WM_T_80003) {
   5781 		/* Default to TRUE to enable the MDIC W/A */
   5782 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5783 
   5784 		rv = wm_kmrn_readreg(sc,
   5785 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5786 		if (rv == 0) {
   5787 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5788 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5789 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5790 			else
   5791 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5792 		}
   5793 	}
   5794 }
   5795 
   5796 /*
   5797  * wm_add_rxbuf:
   5798  *
   5799  *	Add a receive buffer to the indiciated descriptor.
   5800  */
   5801 static int
   5802 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5803 {
   5804 	struct wm_softc *sc = rxq->rxq_sc;
   5805 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5806 	struct mbuf *m;
   5807 	int error;
   5808 
   5809 	KASSERT(mutex_owned(rxq->rxq_lock));
   5810 
   5811 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5812 	if (m == NULL)
   5813 		return ENOBUFS;
   5814 
   5815 	MCLGET(m, M_DONTWAIT);
   5816 	if ((m->m_flags & M_EXT) == 0) {
   5817 		m_freem(m);
   5818 		return ENOBUFS;
   5819 	}
   5820 
   5821 	if (rxs->rxs_mbuf != NULL)
   5822 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5823 
   5824 	rxs->rxs_mbuf = m;
   5825 
   5826 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5827 	/*
   5828 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5829 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5830 	 */
   5831 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5832 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5833 	if (error) {
   5834 		/* XXX XXX XXX */
   5835 		aprint_error_dev(sc->sc_dev,
   5836 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5837 		panic("wm_add_rxbuf");
   5838 	}
   5839 
   5840 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5841 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5842 
   5843 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5844 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5845 			wm_init_rxdesc(rxq, idx);
   5846 	} else
   5847 		wm_init_rxdesc(rxq, idx);
   5848 
   5849 	return 0;
   5850 }
   5851 
   5852 /*
   5853  * wm_rxdrain:
   5854  *
   5855  *	Drain the receive queue.
   5856  */
   5857 static void
   5858 wm_rxdrain(struct wm_rxqueue *rxq)
   5859 {
   5860 	struct wm_softc *sc = rxq->rxq_sc;
   5861 	struct wm_rxsoft *rxs;
   5862 	int i;
   5863 
   5864 	KASSERT(mutex_owned(rxq->rxq_lock));
   5865 
   5866 	for (i = 0; i < WM_NRXDESC; i++) {
   5867 		rxs = &rxq->rxq_soft[i];
   5868 		if (rxs->rxs_mbuf != NULL) {
   5869 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5870 			m_freem(rxs->rxs_mbuf);
   5871 			rxs->rxs_mbuf = NULL;
   5872 		}
   5873 	}
   5874 }
   5875 
   5876 /*
   5877  * Setup registers for RSS.
   5878  *
   5879  * XXX not yet VMDq support
   5880  */
   5881 static void
   5882 wm_init_rss(struct wm_softc *sc)
   5883 {
   5884 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5885 	int i;
   5886 
   5887 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5888 
   5889 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5890 		unsigned int qid, reta_ent;
   5891 
   5892 		qid  = i % sc->sc_nqueues;
   5893 		switch (sc->sc_type) {
   5894 		case WM_T_82574:
   5895 			reta_ent = __SHIFTIN(qid,
   5896 			    RETA_ENT_QINDEX_MASK_82574);
   5897 			break;
   5898 		case WM_T_82575:
   5899 			reta_ent = __SHIFTIN(qid,
   5900 			    RETA_ENT_QINDEX1_MASK_82575);
   5901 			break;
   5902 		default:
   5903 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5904 			break;
   5905 		}
   5906 
   5907 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5908 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5909 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5910 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5911 	}
   5912 
   5913 	rss_getkey((uint8_t *)rss_key);
   5914 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5915 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5916 
   5917 	if (sc->sc_type == WM_T_82574)
   5918 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5919 	else
   5920 		mrqc = MRQC_ENABLE_RSS_MQ;
   5921 
   5922 	/*
   5923 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5924 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5925 	 */
   5926 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5927 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5928 #if 0
   5929 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5930 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5931 #endif
   5932 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5933 
   5934 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5935 }
   5936 
   5937 /*
   5938  * Adjust TX and RX queue numbers which the system actulally uses.
   5939  *
   5940  * The numbers are affected by below parameters.
   5941  *     - The nubmer of hardware queues
   5942  *     - The number of MSI-X vectors (= "nvectors" argument)
   5943  *     - ncpu
   5944  */
   5945 static void
   5946 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5947 {
   5948 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5949 
   5950 	if (nvectors < 2) {
   5951 		sc->sc_nqueues = 1;
   5952 		return;
   5953 	}
   5954 
   5955 	switch (sc->sc_type) {
   5956 	case WM_T_82572:
   5957 		hw_ntxqueues = 2;
   5958 		hw_nrxqueues = 2;
   5959 		break;
   5960 	case WM_T_82574:
   5961 		hw_ntxqueues = 2;
   5962 		hw_nrxqueues = 2;
   5963 		break;
   5964 	case WM_T_82575:
   5965 		hw_ntxqueues = 4;
   5966 		hw_nrxqueues = 4;
   5967 		break;
   5968 	case WM_T_82576:
   5969 		hw_ntxqueues = 16;
   5970 		hw_nrxqueues = 16;
   5971 		break;
   5972 	case WM_T_82580:
   5973 	case WM_T_I350:
   5974 	case WM_T_I354:
   5975 		hw_ntxqueues = 8;
   5976 		hw_nrxqueues = 8;
   5977 		break;
   5978 	case WM_T_I210:
   5979 		hw_ntxqueues = 4;
   5980 		hw_nrxqueues = 4;
   5981 		break;
   5982 	case WM_T_I211:
   5983 		hw_ntxqueues = 2;
   5984 		hw_nrxqueues = 2;
   5985 		break;
   5986 		/*
   5987 		 * The below Ethernet controllers do not support MSI-X;
   5988 		 * this driver doesn't let them use multiqueue.
   5989 		 *     - WM_T_80003
   5990 		 *     - WM_T_ICH8
   5991 		 *     - WM_T_ICH9
   5992 		 *     - WM_T_ICH10
   5993 		 *     - WM_T_PCH
   5994 		 *     - WM_T_PCH2
   5995 		 *     - WM_T_PCH_LPT
   5996 		 */
   5997 	default:
   5998 		hw_ntxqueues = 1;
   5999 		hw_nrxqueues = 1;
   6000 		break;
   6001 	}
   6002 
   6003 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6004 
   6005 	/*
   6006 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6007 	 * the number of queues used actually.
   6008 	 */
   6009 	if (nvectors < hw_nqueues + 1)
   6010 		sc->sc_nqueues = nvectors - 1;
   6011 	else
   6012 		sc->sc_nqueues = hw_nqueues;
   6013 
   6014 	/*
   6015 	 * As queues more than CPUs cannot improve scaling, we limit
   6016 	 * the number of queues used actually.
   6017 	 */
   6018 	if (ncpu < sc->sc_nqueues)
   6019 		sc->sc_nqueues = ncpu;
   6020 }
   6021 
   6022 static inline bool
   6023 wm_is_using_msix(struct wm_softc *sc)
   6024 {
   6025 
   6026 	return (sc->sc_nintrs > 1);
   6027 }
   6028 
   6029 static inline bool
   6030 wm_is_using_multiqueue(struct wm_softc *sc)
   6031 {
   6032 
   6033 	return (sc->sc_nqueues > 1);
   6034 }
   6035 
   6036 static int
   6037 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6038 {
   6039 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6040 
   6041 	wmq->wmq_id = qidx;
   6042 	wmq->wmq_intr_idx = intr_idx;
   6043 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6044 	    wm_handle_queue, wmq);
   6045 	if (wmq->wmq_si != NULL)
   6046 		return 0;
   6047 
   6048 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6049 	    wmq->wmq_id);
   6050 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6051 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6052 	return ENOMEM;
   6053 }
   6054 
   6055 /*
   6056  * Both single interrupt MSI and INTx can use this function.
   6057  */
   6058 static int
   6059 wm_setup_legacy(struct wm_softc *sc)
   6060 {
   6061 	pci_chipset_tag_t pc = sc->sc_pc;
   6062 	const char *intrstr = NULL;
   6063 	char intrbuf[PCI_INTRSTR_LEN];
   6064 	int error;
   6065 
   6066 	error = wm_alloc_txrx_queues(sc);
   6067 	if (error) {
   6068 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6069 		    error);
   6070 		return ENOMEM;
   6071 	}
   6072 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6073 	    sizeof(intrbuf));
   6074 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6075 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6076 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6077 	if (sc->sc_ihs[0] == NULL) {
   6078 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6079 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6080 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6081 		return ENOMEM;
   6082 	}
   6083 
   6084 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6085 	sc->sc_nintrs = 1;
   6086 
   6087 	return wm_softint_establish_queue(sc, 0, 0);
   6088 }
   6089 
   6090 static int
   6091 wm_setup_msix(struct wm_softc *sc)
   6092 {
   6093 	void *vih;
   6094 	kcpuset_t *affinity;
   6095 	int qidx, error, intr_idx, txrx_established;
   6096 	pci_chipset_tag_t pc = sc->sc_pc;
   6097 	const char *intrstr = NULL;
   6098 	char intrbuf[PCI_INTRSTR_LEN];
   6099 	char intr_xname[INTRDEVNAMEBUF];
   6100 
   6101 	if (sc->sc_nqueues < ncpu) {
   6102 		/*
   6103 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6104 		 * interrupts start from CPU#1.
   6105 		 */
   6106 		sc->sc_affinity_offset = 1;
   6107 	} else {
   6108 		/*
   6109 		 * In this case, this device use all CPUs. So, we unify
   6110 		 * affinitied cpu_index to msix vector number for readability.
   6111 		 */
   6112 		sc->sc_affinity_offset = 0;
   6113 	}
   6114 
   6115 	error = wm_alloc_txrx_queues(sc);
   6116 	if (error) {
   6117 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6118 		    error);
   6119 		return ENOMEM;
   6120 	}
   6121 
   6122 	kcpuset_create(&affinity, false);
   6123 	intr_idx = 0;
   6124 
   6125 	/*
   6126 	 * TX and RX
   6127 	 */
   6128 	txrx_established = 0;
   6129 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6130 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6131 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6132 
   6133 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6134 		    sizeof(intrbuf));
   6135 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6136 		    PCI_INTR_MPSAFE, true);
   6137 		memset(intr_xname, 0, sizeof(intr_xname));
   6138 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6139 		    device_xname(sc->sc_dev), qidx);
   6140 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6141 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6142 		if (vih == NULL) {
   6143 			aprint_error_dev(sc->sc_dev,
   6144 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6145 			    intrstr ? " at " : "",
   6146 			    intrstr ? intrstr : "");
   6147 
   6148 			goto fail;
   6149 		}
   6150 		kcpuset_zero(affinity);
   6151 		/* Round-robin affinity */
   6152 		kcpuset_set(affinity, affinity_to);
   6153 		error = interrupt_distribute(vih, affinity, NULL);
   6154 		if (error == 0) {
   6155 			aprint_normal_dev(sc->sc_dev,
   6156 			    "for TX and RX interrupting at %s affinity to %u\n",
   6157 			    intrstr, affinity_to);
   6158 		} else {
   6159 			aprint_normal_dev(sc->sc_dev,
   6160 			    "for TX and RX interrupting at %s\n", intrstr);
   6161 		}
   6162 		sc->sc_ihs[intr_idx] = vih;
   6163 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6164 			goto fail;
   6165 		txrx_established++;
   6166 		intr_idx++;
   6167 	}
   6168 
   6169 	/* LINK */
   6170 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6171 	    sizeof(intrbuf));
   6172 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6173 	memset(intr_xname, 0, sizeof(intr_xname));
   6174 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6175 	    device_xname(sc->sc_dev));
   6176 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6177 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6178 	if (vih == NULL) {
   6179 		aprint_error_dev(sc->sc_dev,
   6180 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6181 		    intrstr ? " at " : "",
   6182 		    intrstr ? intrstr : "");
   6183 
   6184 		goto fail;
   6185 	}
   6186 	/* Keep default affinity to LINK interrupt */
   6187 	aprint_normal_dev(sc->sc_dev,
   6188 	    "for LINK interrupting at %s\n", intrstr);
   6189 	sc->sc_ihs[intr_idx] = vih;
   6190 	sc->sc_link_intr_idx = intr_idx;
   6191 
   6192 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6193 	kcpuset_destroy(affinity);
   6194 	return 0;
   6195 
   6196  fail:
   6197 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6198 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6199 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6200 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6201 	}
   6202 
   6203 	kcpuset_destroy(affinity);
   6204 	return ENOMEM;
   6205 }
   6206 
   6207 static void
   6208 wm_unset_stopping_flags(struct wm_softc *sc)
   6209 {
   6210 	int i;
   6211 
   6212 	KASSERT(mutex_owned(sc->sc_core_lock));
   6213 
   6214 	/* Must unset stopping flags in ascending order. */
   6215 	for (i = 0; i < sc->sc_nqueues; i++) {
   6216 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6217 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6218 
   6219 		mutex_enter(txq->txq_lock);
   6220 		txq->txq_stopping = false;
   6221 		mutex_exit(txq->txq_lock);
   6222 
   6223 		mutex_enter(rxq->rxq_lock);
   6224 		rxq->rxq_stopping = false;
   6225 		mutex_exit(rxq->rxq_lock);
   6226 	}
   6227 
   6228 	sc->sc_core_stopping = false;
   6229 }
   6230 
   6231 static void
   6232 wm_set_stopping_flags(struct wm_softc *sc)
   6233 {
   6234 	int i;
   6235 
   6236 	KASSERT(mutex_owned(sc->sc_core_lock));
   6237 
   6238 	sc->sc_core_stopping = true;
   6239 
   6240 	/* Must set stopping flags in ascending order. */
   6241 	for (i = 0; i < sc->sc_nqueues; i++) {
   6242 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6243 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6244 
   6245 		mutex_enter(rxq->rxq_lock);
   6246 		rxq->rxq_stopping = true;
   6247 		mutex_exit(rxq->rxq_lock);
   6248 
   6249 		mutex_enter(txq->txq_lock);
   6250 		txq->txq_stopping = true;
   6251 		mutex_exit(txq->txq_lock);
   6252 	}
   6253 }
   6254 
   6255 /*
   6256  * Write interrupt interval value to ITR or EITR
   6257  */
   6258 static void
   6259 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6260 {
   6261 
   6262 	if (!wmq->wmq_set_itr)
   6263 		return;
   6264 
   6265 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6266 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6267 
   6268 		/*
   6269 		 * 82575 doesn't have CNT_INGR field.
   6270 		 * So, overwrite counter field by software.
   6271 		 */
   6272 		if (sc->sc_type == WM_T_82575)
   6273 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   6274 		else
   6275 			eitr |= EITR_CNT_INGR;
   6276 
   6277 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6278 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6279 		/*
   6280 		 * 82574 has both ITR and EITR. SET EITR when we use
   6281 		 * the multi queue function with MSI-X.
   6282 		 */
   6283 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6284 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6285 	} else {
   6286 		KASSERT(wmq->wmq_id == 0);
   6287 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6288 	}
   6289 
   6290 	wmq->wmq_set_itr = false;
   6291 }
   6292 
   6293 /*
   6294  * TODO
   6295  * Below dynamic calculation of itr is almost the same as Linux igb,
   6296  * however it does not fit to wm(4). So, we will have been disable AIM
   6297  * until we will find appropriate calculation of itr.
   6298  */
   6299 /*
   6300  * Calculate interrupt interval value to be going to write register in
   6301  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6302  */
   6303 static void
   6304 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6305 {
   6306 #ifdef NOTYET
   6307 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6308 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6309 	uint32_t avg_size = 0;
   6310 	uint32_t new_itr;
   6311 
   6312 	if (rxq->rxq_packets)
   6313 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6314 	if (txq->txq_packets)
   6315 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6316 
   6317 	if (avg_size == 0) {
   6318 		new_itr = 450; /* restore default value */
   6319 		goto out;
   6320 	}
   6321 
   6322 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6323 	avg_size += 24;
   6324 
   6325 	/* Don't starve jumbo frames */
   6326 	avg_size = uimin(avg_size, 3000);
   6327 
   6328 	/* Give a little boost to mid-size frames */
   6329 	if ((avg_size > 300) && (avg_size < 1200))
   6330 		new_itr = avg_size / 3;
   6331 	else
   6332 		new_itr = avg_size / 2;
   6333 
   6334 out:
   6335 	/*
   6336 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6337 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6338 	 */
   6339 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6340 		new_itr *= 4;
   6341 
   6342 	if (new_itr != wmq->wmq_itr) {
   6343 		wmq->wmq_itr = new_itr;
   6344 		wmq->wmq_set_itr = true;
   6345 	} else
   6346 		wmq->wmq_set_itr = false;
   6347 
   6348 	rxq->rxq_packets = 0;
   6349 	rxq->rxq_bytes = 0;
   6350 	txq->txq_packets = 0;
   6351 	txq->txq_bytes = 0;
   6352 #endif
   6353 }
   6354 
   6355 static void
   6356 wm_init_sysctls(struct wm_softc *sc)
   6357 {
   6358 	struct sysctllog **log;
   6359 	const struct sysctlnode *rnode, *qnode, *cnode;
   6360 	int i, rv;
   6361 	const char *dvname;
   6362 
   6363 	log = &sc->sc_sysctllog;
   6364 	dvname = device_xname(sc->sc_dev);
   6365 
   6366 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6367 	    0, CTLTYPE_NODE, dvname,
   6368 	    SYSCTL_DESCR("wm information and settings"),
   6369 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6370 	if (rv != 0)
   6371 		goto err;
   6372 
   6373 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6374 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   6375 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6376 	if (rv != 0)
   6377 		goto teardown;
   6378 
   6379 	for (i = 0; i < sc->sc_nqueues; i++) {
   6380 		struct wm_queue *wmq = &sc->sc_queue[i];
   6381 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6382 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6383 
   6384 		snprintf(sc->sc_queue[i].sysctlname,
   6385 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6386 
   6387 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6388 		    0, CTLTYPE_NODE,
   6389 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6390 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6391 			break;
   6392 
   6393 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6394 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6395 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6396 		    NULL, 0, &txq->txq_free,
   6397 		    0, CTL_CREATE, CTL_EOL) != 0)
   6398 			break;
   6399 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6400 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6401 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6402 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6403 		    0, CTL_CREATE, CTL_EOL) != 0)
   6404 			break;
   6405 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6406 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6407 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6408 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6409 		    0, CTL_CREATE, CTL_EOL) != 0)
   6410 			break;
   6411 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6412 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6413 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6414 		    NULL, 0, &txq->txq_next,
   6415 		    0, CTL_CREATE, CTL_EOL) != 0)
   6416 			break;
   6417 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6418 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6419 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6420 		    NULL, 0, &txq->txq_sfree,
   6421 		    0, CTL_CREATE, CTL_EOL) != 0)
   6422 			break;
   6423 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6424 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6425 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6426 		    NULL, 0, &txq->txq_snext,
   6427 		    0, CTL_CREATE, CTL_EOL) != 0)
   6428 			break;
   6429 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6430 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6431 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6432 		    NULL, 0, &txq->txq_sdirty,
   6433 		    0, CTL_CREATE, CTL_EOL) != 0)
   6434 			break;
   6435 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6436 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6437 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6438 		    NULL, 0, &txq->txq_flags,
   6439 		    0, CTL_CREATE, CTL_EOL) != 0)
   6440 			break;
   6441 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6442 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6443 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6444 		    NULL, 0, &txq->txq_stopping,
   6445 		    0, CTL_CREATE, CTL_EOL) != 0)
   6446 			break;
   6447 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6448 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6449 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6450 		    NULL, 0, &txq->txq_sending,
   6451 		    0, CTL_CREATE, CTL_EOL) != 0)
   6452 			break;
   6453 
   6454 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6455 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6456 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6457 		    NULL, 0, &rxq->rxq_ptr,
   6458 		    0, CTL_CREATE, CTL_EOL) != 0)
   6459 			break;
   6460 	}
   6461 
   6462 #ifdef WM_DEBUG
   6463 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6464 	    CTLTYPE_INT, "debug_flags",
   6465 	    SYSCTL_DESCR(
   6466 		    "Debug flags:\n"	\
   6467 		    "\t0x01 LINK\n"	\
   6468 		    "\t0x02 TX\n"	\
   6469 		    "\t0x04 RX\n"	\
   6470 		    "\t0x08 GMII\n"	\
   6471 		    "\t0x10 MANAGE\n"	\
   6472 		    "\t0x20 NVM\n"	\
   6473 		    "\t0x40 INIT\n"	\
   6474 		    "\t0x80 LOCK"),
   6475 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6476 	if (rv != 0)
   6477 		goto teardown;
   6478 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6479 	    CTLTYPE_BOOL, "trigger_reset",
   6480 	    SYSCTL_DESCR("Trigger an interface reset"),
   6481 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6482 	if (rv != 0)
   6483 		goto teardown;
   6484 #endif
   6485 
   6486 	return;
   6487 
   6488 teardown:
   6489 	sysctl_teardown(log);
   6490 err:
   6491 	sc->sc_sysctllog = NULL;
   6492 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6493 	    __func__, rv);
   6494 }
   6495 
   6496 /*
   6497  * wm_init:		[ifnet interface function]
   6498  *
   6499  *	Initialize the interface.
   6500  */
   6501 static int
   6502 wm_init(struct ifnet *ifp)
   6503 {
   6504 	struct wm_softc *sc = ifp->if_softc;
   6505 	int ret;
   6506 
   6507 	KASSERT(IFNET_LOCKED(ifp));
   6508 
   6509 	if (sc->sc_dying)
   6510 		return ENXIO;
   6511 
   6512 	mutex_enter(sc->sc_core_lock);
   6513 	ret = wm_init_locked(ifp);
   6514 	mutex_exit(sc->sc_core_lock);
   6515 
   6516 	return ret;
   6517 }
   6518 
   6519 static int
   6520 wm_init_locked(struct ifnet *ifp)
   6521 {
   6522 	struct wm_softc *sc = ifp->if_softc;
   6523 	struct ethercom *ec = &sc->sc_ethercom;
   6524 	int i, j, trynum, error = 0;
   6525 	uint32_t reg, sfp_mask = 0;
   6526 
   6527 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6528 		device_xname(sc->sc_dev), __func__));
   6529 	KASSERT(IFNET_LOCKED(ifp));
   6530 	KASSERT(mutex_owned(sc->sc_core_lock));
   6531 
   6532 	/*
   6533 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6534 	 * There is a small but measurable benefit to avoiding the adjusment
   6535 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6536 	 * on such platforms.  One possibility is that the DMA itself is
   6537 	 * slightly more efficient if the front of the entire packet (instead
   6538 	 * of the front of the headers) is aligned.
   6539 	 *
   6540 	 * Note we must always set align_tweak to 0 if we are using
   6541 	 * jumbo frames.
   6542 	 */
   6543 #ifdef __NO_STRICT_ALIGNMENT
   6544 	sc->sc_align_tweak = 0;
   6545 #else
   6546 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6547 		sc->sc_align_tweak = 0;
   6548 	else
   6549 		sc->sc_align_tweak = 2;
   6550 #endif /* __NO_STRICT_ALIGNMENT */
   6551 
   6552 	/* Cancel any pending I/O. */
   6553 	wm_stop_locked(ifp, false, false);
   6554 
   6555 	/* Update statistics before reset */
   6556 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6557 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6558 
   6559 	/* >= PCH_SPT hardware workaround before reset. */
   6560 	if (sc->sc_type >= WM_T_PCH_SPT)
   6561 		wm_flush_desc_rings(sc);
   6562 
   6563 	/* Reset the chip to a known state. */
   6564 	wm_reset(sc);
   6565 
   6566 	/*
   6567 	 * AMT based hardware can now take control from firmware
   6568 	 * Do this after reset.
   6569 	 */
   6570 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6571 		wm_get_hw_control(sc);
   6572 
   6573 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6574 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6575 		wm_legacy_irq_quirk_spt(sc);
   6576 
   6577 	/* Init hardware bits */
   6578 	wm_initialize_hardware_bits(sc);
   6579 
   6580 	/* Reset the PHY. */
   6581 	if (sc->sc_flags & WM_F_HAS_MII)
   6582 		wm_gmii_reset(sc);
   6583 
   6584 	if (sc->sc_type >= WM_T_ICH8) {
   6585 		reg = CSR_READ(sc, WMREG_GCR);
   6586 		/*
   6587 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6588 		 * default after reset.
   6589 		 */
   6590 		if (sc->sc_type == WM_T_ICH8)
   6591 			reg |= GCR_NO_SNOOP_ALL;
   6592 		else
   6593 			reg &= ~GCR_NO_SNOOP_ALL;
   6594 		CSR_WRITE(sc, WMREG_GCR, reg);
   6595 	}
   6596 
   6597 	if ((sc->sc_type >= WM_T_ICH8)
   6598 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6599 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6600 
   6601 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6602 		reg |= CTRL_EXT_RO_DIS;
   6603 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6604 	}
   6605 
   6606 	/* Calculate (E)ITR value */
   6607 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6608 		/*
   6609 		 * For NEWQUEUE's EITR (except for 82575).
   6610 		 * 82575's EITR should be set same throttling value as other
   6611 		 * old controllers' ITR because the interrupt/sec calculation
   6612 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6613 		 *
   6614 		 * 82574's EITR should be set same throttling value as ITR.
   6615 		 *
   6616 		 * For N interrupts/sec, set this value to:
   6617 		 * 1,000,000 / N in contrast to ITR throttling value.
   6618 		 */
   6619 		sc->sc_itr_init = 450;
   6620 	} else if (sc->sc_type >= WM_T_82543) {
   6621 		/*
   6622 		 * Set up the interrupt throttling register (units of 256ns)
   6623 		 * Note that a footnote in Intel's documentation says this
   6624 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6625 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6626 		 * that that is also true for the 1024ns units of the other
   6627 		 * interrupt-related timer registers -- so, really, we ought
   6628 		 * to divide this value by 4 when the link speed is low.
   6629 		 *
   6630 		 * XXX implement this division at link speed change!
   6631 		 */
   6632 
   6633 		/*
   6634 		 * For N interrupts/sec, set this value to:
   6635 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6636 		 * absolute and packet timer values to this value
   6637 		 * divided by 4 to get "simple timer" behavior.
   6638 		 */
   6639 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6640 	}
   6641 
   6642 	error = wm_init_txrx_queues(sc);
   6643 	if (error)
   6644 		goto out;
   6645 
   6646 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6647 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6648 	    (sc->sc_type >= WM_T_82575))
   6649 		wm_serdes_power_up_link_82575(sc);
   6650 
   6651 	/* Clear out the VLAN table -- we don't use it (yet). */
   6652 	CSR_WRITE(sc, WMREG_VET, 0);
   6653 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6654 		trynum = 10; /* Due to hw errata */
   6655 	else
   6656 		trynum = 1;
   6657 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6658 		for (j = 0; j < trynum; j++)
   6659 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6660 
   6661 	/*
   6662 	 * Set up flow-control parameters.
   6663 	 *
   6664 	 * XXX Values could probably stand some tuning.
   6665 	 */
   6666 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6667 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6668 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6669 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6670 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6671 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6672 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6673 	}
   6674 
   6675 	sc->sc_fcrtl = FCRTL_DFLT;
   6676 	if (sc->sc_type < WM_T_82543) {
   6677 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6678 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6679 	} else {
   6680 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6681 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6682 	}
   6683 
   6684 	if (sc->sc_type == WM_T_80003)
   6685 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6686 	else
   6687 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6688 
   6689 	/* Writes the control register. */
   6690 	wm_set_vlan(sc);
   6691 
   6692 	if (sc->sc_flags & WM_F_HAS_MII) {
   6693 		uint16_t kmreg;
   6694 
   6695 		switch (sc->sc_type) {
   6696 		case WM_T_80003:
   6697 		case WM_T_ICH8:
   6698 		case WM_T_ICH9:
   6699 		case WM_T_ICH10:
   6700 		case WM_T_PCH:
   6701 		case WM_T_PCH2:
   6702 		case WM_T_PCH_LPT:
   6703 		case WM_T_PCH_SPT:
   6704 		case WM_T_PCH_CNP:
   6705 			/*
   6706 			 * Set the mac to wait the maximum time between each
   6707 			 * iteration and increase the max iterations when
   6708 			 * polling the phy; this fixes erroneous timeouts at
   6709 			 * 10Mbps.
   6710 			 */
   6711 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6712 			    0xFFFF);
   6713 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6714 			    &kmreg);
   6715 			kmreg |= 0x3F;
   6716 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6717 			    kmreg);
   6718 			break;
   6719 		default:
   6720 			break;
   6721 		}
   6722 
   6723 		if (sc->sc_type == WM_T_80003) {
   6724 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6725 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6726 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6727 
   6728 			/* Bypass RX and TX FIFOs */
   6729 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6730 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6731 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6732 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6733 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6734 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6735 		}
   6736 	}
   6737 #if 0
   6738 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6739 #endif
   6740 
   6741 	/* Set up checksum offload parameters. */
   6742 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6743 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6744 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6745 		reg |= RXCSUM_IPOFL;
   6746 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6747 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6748 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6749 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6750 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6751 
   6752 	/* Set registers about MSI-X */
   6753 	if (wm_is_using_msix(sc)) {
   6754 		uint32_t ivar, qintr_idx;
   6755 		struct wm_queue *wmq;
   6756 		unsigned int qid;
   6757 
   6758 		if (sc->sc_type == WM_T_82575) {
   6759 			/* Interrupt control */
   6760 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6761 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6762 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6763 
   6764 			/* TX and RX */
   6765 			for (i = 0; i < sc->sc_nqueues; i++) {
   6766 				wmq = &sc->sc_queue[i];
   6767 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6768 				    EITR_TX_QUEUE(wmq->wmq_id)
   6769 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6770 			}
   6771 			/* Link status */
   6772 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6773 			    EITR_OTHER);
   6774 		} else if (sc->sc_type == WM_T_82574) {
   6775 			/* Interrupt control */
   6776 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6777 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6778 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6779 
   6780 			/*
   6781 			 * Work around issue with spurious interrupts
   6782 			 * in MSI-X mode.
   6783 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6784 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6785 			 */
   6786 			reg = CSR_READ(sc, WMREG_RFCTL);
   6787 			reg |= WMREG_RFCTL_ACKDIS;
   6788 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6789 
   6790 			ivar = 0;
   6791 			/* TX and RX */
   6792 			for (i = 0; i < sc->sc_nqueues; i++) {
   6793 				wmq = &sc->sc_queue[i];
   6794 				qid = wmq->wmq_id;
   6795 				qintr_idx = wmq->wmq_intr_idx;
   6796 
   6797 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6798 				    IVAR_TX_MASK_Q_82574(qid));
   6799 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6800 				    IVAR_RX_MASK_Q_82574(qid));
   6801 			}
   6802 			/* Link status */
   6803 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6804 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6805 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6806 		} else {
   6807 			/* Interrupt control */
   6808 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6809 			    | GPIE_EIAME | GPIE_PBA);
   6810 
   6811 			switch (sc->sc_type) {
   6812 			case WM_T_82580:
   6813 			case WM_T_I350:
   6814 			case WM_T_I354:
   6815 			case WM_T_I210:
   6816 			case WM_T_I211:
   6817 				/* TX and RX */
   6818 				for (i = 0; i < sc->sc_nqueues; i++) {
   6819 					wmq = &sc->sc_queue[i];
   6820 					qid = wmq->wmq_id;
   6821 					qintr_idx = wmq->wmq_intr_idx;
   6822 
   6823 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6824 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6825 					ivar |= __SHIFTIN((qintr_idx
   6826 						| IVAR_VALID),
   6827 					    IVAR_TX_MASK_Q(qid));
   6828 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6829 					ivar |= __SHIFTIN((qintr_idx
   6830 						| IVAR_VALID),
   6831 					    IVAR_RX_MASK_Q(qid));
   6832 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6833 				}
   6834 				break;
   6835 			case WM_T_82576:
   6836 				/* TX and RX */
   6837 				for (i = 0; i < sc->sc_nqueues; i++) {
   6838 					wmq = &sc->sc_queue[i];
   6839 					qid = wmq->wmq_id;
   6840 					qintr_idx = wmq->wmq_intr_idx;
   6841 
   6842 					ivar = CSR_READ(sc,
   6843 					    WMREG_IVAR_Q_82576(qid));
   6844 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6845 					ivar |= __SHIFTIN((qintr_idx
   6846 						| IVAR_VALID),
   6847 					    IVAR_TX_MASK_Q_82576(qid));
   6848 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6849 					ivar |= __SHIFTIN((qintr_idx
   6850 						| IVAR_VALID),
   6851 					    IVAR_RX_MASK_Q_82576(qid));
   6852 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6853 					    ivar);
   6854 				}
   6855 				break;
   6856 			default:
   6857 				break;
   6858 			}
   6859 
   6860 			/* Link status */
   6861 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6862 			    IVAR_MISC_OTHER);
   6863 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6864 		}
   6865 
   6866 		if (wm_is_using_multiqueue(sc)) {
   6867 			wm_init_rss(sc);
   6868 
   6869 			/*
   6870 			** NOTE: Receive Full-Packet Checksum Offload
   6871 			** is mutually exclusive with Multiqueue. However
   6872 			** this is not the same as TCP/IP checksums which
   6873 			** still work.
   6874 			*/
   6875 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6876 			reg |= RXCSUM_PCSD;
   6877 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6878 		}
   6879 	}
   6880 
   6881 	/* Set up the interrupt registers. */
   6882 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6883 
   6884 	/* Enable SFP module insertion interrupt if it's required */
   6885 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6886 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6887 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6888 		sfp_mask = ICR_GPI(0);
   6889 	}
   6890 
   6891 	if (wm_is_using_msix(sc)) {
   6892 		uint32_t mask;
   6893 		struct wm_queue *wmq;
   6894 
   6895 		switch (sc->sc_type) {
   6896 		case WM_T_82574:
   6897 			mask = 0;
   6898 			for (i = 0; i < sc->sc_nqueues; i++) {
   6899 				wmq = &sc->sc_queue[i];
   6900 				mask |= ICR_TXQ(wmq->wmq_id);
   6901 				mask |= ICR_RXQ(wmq->wmq_id);
   6902 			}
   6903 			mask |= ICR_OTHER;
   6904 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6905 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6906 			break;
   6907 		default:
   6908 			if (sc->sc_type == WM_T_82575) {
   6909 				mask = 0;
   6910 				for (i = 0; i < sc->sc_nqueues; i++) {
   6911 					wmq = &sc->sc_queue[i];
   6912 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6913 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6914 				}
   6915 				mask |= EITR_OTHER;
   6916 			} else {
   6917 				mask = 0;
   6918 				for (i = 0; i < sc->sc_nqueues; i++) {
   6919 					wmq = &sc->sc_queue[i];
   6920 					mask |= 1 << wmq->wmq_intr_idx;
   6921 				}
   6922 				mask |= 1 << sc->sc_link_intr_idx;
   6923 			}
   6924 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6925 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6926 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6927 
   6928 			/* For other interrupts */
   6929 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6930 			break;
   6931 		}
   6932 	} else {
   6933 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6934 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6935 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6936 	}
   6937 
   6938 	/* Set up the inter-packet gap. */
   6939 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6940 
   6941 	if (sc->sc_type >= WM_T_82543) {
   6942 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6943 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6944 			wm_itrs_writereg(sc, wmq);
   6945 		}
   6946 		/*
   6947 		 * Link interrupts occur much less than TX
   6948 		 * interrupts and RX interrupts. So, we don't
   6949 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6950 		 * FreeBSD's if_igb.
   6951 		 */
   6952 	}
   6953 
   6954 	/* Set the VLAN EtherType. */
   6955 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6956 
   6957 	/*
   6958 	 * Set up the transmit control register; we start out with
   6959 	 * a collision distance suitable for FDX, but update it when
   6960 	 * we resolve the media type.
   6961 	 */
   6962 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6963 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6964 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6965 	if (sc->sc_type >= WM_T_82571)
   6966 		sc->sc_tctl |= TCTL_MULR;
   6967 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6968 
   6969 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6970 		/* Write TDT after TCTL.EN is set. See the document. */
   6971 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6972 	}
   6973 
   6974 	if (sc->sc_type == WM_T_80003) {
   6975 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6976 		reg &= ~TCTL_EXT_GCEX_MASK;
   6977 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6978 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6979 	}
   6980 
   6981 	/* Set the media. */
   6982 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6983 		goto out;
   6984 
   6985 	/* Configure for OS presence */
   6986 	wm_init_manageability(sc);
   6987 
   6988 	/*
   6989 	 * Set up the receive control register; we actually program the
   6990 	 * register when we set the receive filter. Use multicast address
   6991 	 * offset type 0.
   6992 	 *
   6993 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6994 	 * don't enable that feature.
   6995 	 */
   6996 	sc->sc_mchash_type = 0;
   6997 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6998 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6999 
   7000 	/* 82574 use one buffer extended Rx descriptor. */
   7001 	if (sc->sc_type == WM_T_82574)
   7002 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7003 
   7004 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7005 		sc->sc_rctl |= RCTL_SECRC;
   7006 
   7007 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7008 	    && (ifp->if_mtu > ETHERMTU)) {
   7009 		sc->sc_rctl |= RCTL_LPE;
   7010 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7011 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7012 	}
   7013 
   7014 	if (MCLBYTES == 2048)
   7015 		sc->sc_rctl |= RCTL_2k;
   7016 	else {
   7017 		if (sc->sc_type >= WM_T_82543) {
   7018 			switch (MCLBYTES) {
   7019 			case 4096:
   7020 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7021 				break;
   7022 			case 8192:
   7023 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7024 				break;
   7025 			case 16384:
   7026 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7027 				break;
   7028 			default:
   7029 				panic("wm_init: MCLBYTES %d unsupported",
   7030 				    MCLBYTES);
   7031 				break;
   7032 			}
   7033 		} else
   7034 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7035 	}
   7036 
   7037 	/* Enable ECC */
   7038 	switch (sc->sc_type) {
   7039 	case WM_T_82571:
   7040 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7041 		reg |= PBA_ECC_CORR_EN;
   7042 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7043 		break;
   7044 	case WM_T_PCH_LPT:
   7045 	case WM_T_PCH_SPT:
   7046 	case WM_T_PCH_CNP:
   7047 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7048 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7049 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7050 
   7051 		sc->sc_ctrl |= CTRL_MEHE;
   7052 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7053 		break;
   7054 	default:
   7055 		break;
   7056 	}
   7057 
   7058 	/*
   7059 	 * Set the receive filter.
   7060 	 *
   7061 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7062 	 * the setting of RCTL.EN in wm_set_filter()
   7063 	 */
   7064 	wm_set_filter(sc);
   7065 
   7066 	/* On 575 and later set RDT only if RX enabled */
   7067 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7068 		int qidx;
   7069 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7070 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7071 			for (i = 0; i < WM_NRXDESC; i++) {
   7072 				mutex_enter(rxq->rxq_lock);
   7073 				wm_init_rxdesc(rxq, i);
   7074 				mutex_exit(rxq->rxq_lock);
   7075 
   7076 			}
   7077 		}
   7078 	}
   7079 
   7080 	wm_unset_stopping_flags(sc);
   7081 
   7082 	/* Start the one second link check clock. */
   7083 	callout_schedule(&sc->sc_tick_ch, hz);
   7084 
   7085 	/*
   7086 	 * ...all done! (IFNET_LOCKED asserted above.)
   7087 	 */
   7088 	ifp->if_flags |= IFF_RUNNING;
   7089 
   7090  out:
   7091 	/* Save last flags for the callback */
   7092 	sc->sc_if_flags = ifp->if_flags;
   7093 	sc->sc_ec_capenable = ec->ec_capenable;
   7094 	if (error)
   7095 		log(LOG_ERR, "%s: interface not running\n",
   7096 		    device_xname(sc->sc_dev));
   7097 	return error;
   7098 }
   7099 
   7100 /*
   7101  * wm_stop:		[ifnet interface function]
   7102  *
   7103  *	Stop transmission on the interface.
   7104  */
   7105 static void
   7106 wm_stop(struct ifnet *ifp, int disable)
   7107 {
   7108 	struct wm_softc *sc = ifp->if_softc;
   7109 
   7110 	ASSERT_SLEEPABLE();
   7111 	KASSERT(IFNET_LOCKED(ifp));
   7112 
   7113 	mutex_enter(sc->sc_core_lock);
   7114 	wm_stop_locked(ifp, disable ? true : false, true);
   7115 	mutex_exit(sc->sc_core_lock);
   7116 
   7117 	/*
   7118 	 * After wm_set_stopping_flags(), it is guaranteed that
   7119 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7120 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7121 	 * because it can sleep...
   7122 	 * so, call workqueue_wait() here.
   7123 	 */
   7124 	for (int i = 0; i < sc->sc_nqueues; i++)
   7125 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7126 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7127 }
   7128 
   7129 static void
   7130 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7131 {
   7132 	struct wm_softc *sc = ifp->if_softc;
   7133 	struct wm_txsoft *txs;
   7134 	int i, qidx;
   7135 
   7136 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7137 		device_xname(sc->sc_dev), __func__));
   7138 	KASSERT(IFNET_LOCKED(ifp));
   7139 	KASSERT(mutex_owned(sc->sc_core_lock));
   7140 
   7141 	wm_set_stopping_flags(sc);
   7142 
   7143 	if (sc->sc_flags & WM_F_HAS_MII) {
   7144 		/* Down the MII. */
   7145 		mii_down(&sc->sc_mii);
   7146 	} else {
   7147 #if 0
   7148 		/* Should we clear PHY's status properly? */
   7149 		wm_reset(sc);
   7150 #endif
   7151 	}
   7152 
   7153 	/* Stop the transmit and receive processes. */
   7154 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7155 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7156 	sc->sc_rctl &= ~RCTL_EN;
   7157 
   7158 	/*
   7159 	 * Clear the interrupt mask to ensure the device cannot assert its
   7160 	 * interrupt line.
   7161 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7162 	 * service any currently pending or shared interrupt.
   7163 	 */
   7164 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7165 	sc->sc_icr = 0;
   7166 	if (wm_is_using_msix(sc)) {
   7167 		if (sc->sc_type != WM_T_82574) {
   7168 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7169 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7170 		} else
   7171 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7172 	}
   7173 
   7174 	/*
   7175 	 * Stop callouts after interrupts are disabled; if we have
   7176 	 * to wait for them, we will be releasing the CORE_LOCK
   7177 	 * briefly, which will unblock interrupts on the current CPU.
   7178 	 */
   7179 
   7180 	/* Stop the one second clock. */
   7181 	if (wait)
   7182 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7183 	else
   7184 		callout_stop(&sc->sc_tick_ch);
   7185 
   7186 	/* Stop the 82547 Tx FIFO stall check timer. */
   7187 	if (sc->sc_type == WM_T_82547) {
   7188 		if (wait)
   7189 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7190 		else
   7191 			callout_stop(&sc->sc_txfifo_ch);
   7192 	}
   7193 
   7194 	/* Release any queued transmit buffers. */
   7195 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7196 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7197 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7198 		struct mbuf *m;
   7199 
   7200 		mutex_enter(txq->txq_lock);
   7201 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7202 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7203 			txs = &txq->txq_soft[i];
   7204 			if (txs->txs_mbuf != NULL) {
   7205 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7206 				m_freem(txs->txs_mbuf);
   7207 				txs->txs_mbuf = NULL;
   7208 			}
   7209 		}
   7210 		/* Drain txq_interq */
   7211 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7212 			m_freem(m);
   7213 		mutex_exit(txq->txq_lock);
   7214 	}
   7215 
   7216 	/* Mark the interface as down and cancel the watchdog timer. */
   7217 	ifp->if_flags &= ~IFF_RUNNING;
   7218 	sc->sc_if_flags = ifp->if_flags;
   7219 
   7220 	if (disable) {
   7221 		for (i = 0; i < sc->sc_nqueues; i++) {
   7222 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7223 			mutex_enter(rxq->rxq_lock);
   7224 			wm_rxdrain(rxq);
   7225 			mutex_exit(rxq->rxq_lock);
   7226 		}
   7227 	}
   7228 
   7229 #if 0 /* notyet */
   7230 	if (sc->sc_type >= WM_T_82544)
   7231 		CSR_WRITE(sc, WMREG_WUC, 0);
   7232 #endif
   7233 }
   7234 
   7235 static void
   7236 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7237 {
   7238 	struct mbuf *m;
   7239 	int i;
   7240 
   7241 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7242 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7243 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7244 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7245 		    m->m_data, m->m_len, m->m_flags);
   7246 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7247 	    i, i == 1 ? "" : "s");
   7248 }
   7249 
   7250 /*
   7251  * wm_82547_txfifo_stall:
   7252  *
   7253  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7254  *	reset the FIFO pointers, and restart packet transmission.
   7255  */
   7256 static void
   7257 wm_82547_txfifo_stall(void *arg)
   7258 {
   7259 	struct wm_softc *sc = arg;
   7260 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7261 
   7262 	mutex_enter(txq->txq_lock);
   7263 
   7264 	if (txq->txq_stopping)
   7265 		goto out;
   7266 
   7267 	if (txq->txq_fifo_stall) {
   7268 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7269 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7270 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7271 			/*
   7272 			 * Packets have drained.  Stop transmitter, reset
   7273 			 * FIFO pointers, restart transmitter, and kick
   7274 			 * the packet queue.
   7275 			 */
   7276 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7277 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7278 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7279 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7280 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7281 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7282 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7283 			CSR_WRITE_FLUSH(sc);
   7284 
   7285 			txq->txq_fifo_head = 0;
   7286 			txq->txq_fifo_stall = 0;
   7287 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7288 		} else {
   7289 			/*
   7290 			 * Still waiting for packets to drain; try again in
   7291 			 * another tick.
   7292 			 */
   7293 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7294 		}
   7295 	}
   7296 
   7297 out:
   7298 	mutex_exit(txq->txq_lock);
   7299 }
   7300 
   7301 /*
   7302  * wm_82547_txfifo_bugchk:
   7303  *
   7304  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7305  *	prevent enqueueing a packet that would wrap around the end
   7306  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7307  *
   7308  *	We do this by checking the amount of space before the end
   7309  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7310  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7311  *	the internal FIFO pointers to the beginning, and restart
   7312  *	transmission on the interface.
   7313  */
   7314 #define	WM_FIFO_HDR		0x10
   7315 #define	WM_82547_PAD_LEN	0x3e0
   7316 static int
   7317 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7318 {
   7319 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7320 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7321 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7322 
   7323 	/* Just return if already stalled. */
   7324 	if (txq->txq_fifo_stall)
   7325 		return 1;
   7326 
   7327 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7328 		/* Stall only occurs in half-duplex mode. */
   7329 		goto send_packet;
   7330 	}
   7331 
   7332 	if (len >= WM_82547_PAD_LEN + space) {
   7333 		txq->txq_fifo_stall = 1;
   7334 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7335 		return 1;
   7336 	}
   7337 
   7338  send_packet:
   7339 	txq->txq_fifo_head += len;
   7340 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7341 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7342 
   7343 	return 0;
   7344 }
   7345 
   7346 static int
   7347 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7348 {
   7349 	int error;
   7350 
   7351 	/*
   7352 	 * Allocate the control data structures, and create and load the
   7353 	 * DMA map for it.
   7354 	 *
   7355 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7356 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7357 	 * both sets within the same 4G segment.
   7358 	 */
   7359 	if (sc->sc_type < WM_T_82544)
   7360 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7361 	else
   7362 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7363 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7364 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7365 	else
   7366 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7367 
   7368 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7369 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7370 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7371 		aprint_error_dev(sc->sc_dev,
   7372 		    "unable to allocate TX control data, error = %d\n",
   7373 		    error);
   7374 		goto fail_0;
   7375 	}
   7376 
   7377 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7378 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7379 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7380 		aprint_error_dev(sc->sc_dev,
   7381 		    "unable to map TX control data, error = %d\n", error);
   7382 		goto fail_1;
   7383 	}
   7384 
   7385 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7386 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7387 		aprint_error_dev(sc->sc_dev,
   7388 		    "unable to create TX control data DMA map, error = %d\n",
   7389 		    error);
   7390 		goto fail_2;
   7391 	}
   7392 
   7393 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7394 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7395 		aprint_error_dev(sc->sc_dev,
   7396 		    "unable to load TX control data DMA map, error = %d\n",
   7397 		    error);
   7398 		goto fail_3;
   7399 	}
   7400 
   7401 	return 0;
   7402 
   7403  fail_3:
   7404 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7405  fail_2:
   7406 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7407 	    WM_TXDESCS_SIZE(txq));
   7408  fail_1:
   7409 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7410  fail_0:
   7411 	return error;
   7412 }
   7413 
   7414 static void
   7415 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7416 {
   7417 
   7418 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7419 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7420 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7421 	    WM_TXDESCS_SIZE(txq));
   7422 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7423 }
   7424 
   7425 static int
   7426 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7427 {
   7428 	int error;
   7429 	size_t rxq_descs_size;
   7430 
   7431 	/*
   7432 	 * Allocate the control data structures, and create and load the
   7433 	 * DMA map for it.
   7434 	 *
   7435 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7436 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7437 	 * both sets within the same 4G segment.
   7438 	 */
   7439 	rxq->rxq_ndesc = WM_NRXDESC;
   7440 	if (sc->sc_type == WM_T_82574)
   7441 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7442 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7443 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7444 	else
   7445 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7446 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7447 
   7448 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7449 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7450 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7451 		aprint_error_dev(sc->sc_dev,
   7452 		    "unable to allocate RX control data, error = %d\n",
   7453 		    error);
   7454 		goto fail_0;
   7455 	}
   7456 
   7457 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7458 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7459 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7460 		aprint_error_dev(sc->sc_dev,
   7461 		    "unable to map RX control data, error = %d\n", error);
   7462 		goto fail_1;
   7463 	}
   7464 
   7465 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7466 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7467 		aprint_error_dev(sc->sc_dev,
   7468 		    "unable to create RX control data DMA map, error = %d\n",
   7469 		    error);
   7470 		goto fail_2;
   7471 	}
   7472 
   7473 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7474 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7475 		aprint_error_dev(sc->sc_dev,
   7476 		    "unable to load RX control data DMA map, error = %d\n",
   7477 		    error);
   7478 		goto fail_3;
   7479 	}
   7480 
   7481 	return 0;
   7482 
   7483  fail_3:
   7484 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7485  fail_2:
   7486 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7487 	    rxq_descs_size);
   7488  fail_1:
   7489 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7490  fail_0:
   7491 	return error;
   7492 }
   7493 
   7494 static void
   7495 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7496 {
   7497 
   7498 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7499 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7500 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7501 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7502 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7503 }
   7504 
   7505 
   7506 static int
   7507 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7508 {
   7509 	int i, error;
   7510 
   7511 	/* Create the transmit buffer DMA maps. */
   7512 	WM_TXQUEUELEN(txq) =
   7513 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7514 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7515 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7516 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7517 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7518 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7519 			aprint_error_dev(sc->sc_dev,
   7520 			    "unable to create Tx DMA map %d, error = %d\n",
   7521 			    i, error);
   7522 			goto fail;
   7523 		}
   7524 	}
   7525 
   7526 	return 0;
   7527 
   7528  fail:
   7529 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7530 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7531 			bus_dmamap_destroy(sc->sc_dmat,
   7532 			    txq->txq_soft[i].txs_dmamap);
   7533 	}
   7534 	return error;
   7535 }
   7536 
   7537 static void
   7538 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7539 {
   7540 	int i;
   7541 
   7542 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7543 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7544 			bus_dmamap_destroy(sc->sc_dmat,
   7545 			    txq->txq_soft[i].txs_dmamap);
   7546 	}
   7547 }
   7548 
   7549 static int
   7550 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7551 {
   7552 	int i, error;
   7553 
   7554 	/* Create the receive buffer DMA maps. */
   7555 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7556 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7557 			    MCLBYTES, 0, 0,
   7558 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7559 			aprint_error_dev(sc->sc_dev,
   7560 			    "unable to create Rx DMA map %d error = %d\n",
   7561 			    i, error);
   7562 			goto fail;
   7563 		}
   7564 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7565 	}
   7566 
   7567 	return 0;
   7568 
   7569  fail:
   7570 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7571 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7572 			bus_dmamap_destroy(sc->sc_dmat,
   7573 			    rxq->rxq_soft[i].rxs_dmamap);
   7574 	}
   7575 	return error;
   7576 }
   7577 
   7578 static void
   7579 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7580 {
   7581 	int i;
   7582 
   7583 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7584 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7585 			bus_dmamap_destroy(sc->sc_dmat,
   7586 			    rxq->rxq_soft[i].rxs_dmamap);
   7587 	}
   7588 }
   7589 
   7590 /*
   7591  * wm_alloc_quques:
   7592  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7593  */
   7594 static int
   7595 wm_alloc_txrx_queues(struct wm_softc *sc)
   7596 {
   7597 	int i, error, tx_done, rx_done;
   7598 
   7599 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7600 	    KM_SLEEP);
   7601 	if (sc->sc_queue == NULL) {
   7602 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7603 		error = ENOMEM;
   7604 		goto fail_0;
   7605 	}
   7606 
   7607 	/* For transmission */
   7608 	error = 0;
   7609 	tx_done = 0;
   7610 	for (i = 0; i < sc->sc_nqueues; i++) {
   7611 #ifdef WM_EVENT_COUNTERS
   7612 		int j;
   7613 		const char *xname;
   7614 #endif
   7615 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7616 		txq->txq_sc = sc;
   7617 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7618 
   7619 		error = wm_alloc_tx_descs(sc, txq);
   7620 		if (error)
   7621 			break;
   7622 		error = wm_alloc_tx_buffer(sc, txq);
   7623 		if (error) {
   7624 			wm_free_tx_descs(sc, txq);
   7625 			break;
   7626 		}
   7627 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7628 		if (txq->txq_interq == NULL) {
   7629 			wm_free_tx_descs(sc, txq);
   7630 			wm_free_tx_buffer(sc, txq);
   7631 			error = ENOMEM;
   7632 			break;
   7633 		}
   7634 
   7635 #ifdef WM_EVENT_COUNTERS
   7636 		xname = device_xname(sc->sc_dev);
   7637 
   7638 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7639 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7640 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7641 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7642 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7643 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7644 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7645 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7646 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7647 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7648 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7649 
   7650 		for (j = 0; j < WM_NTXSEGS; j++) {
   7651 			snprintf(txq->txq_txseg_evcnt_names[j],
   7652 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   7653 			    "txq%02dtxseg%d", i, j);
   7654 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   7655 			    EVCNT_TYPE_MISC,
   7656 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7657 		}
   7658 
   7659 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7660 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7661 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7662 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7663 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7664 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7665 #endif /* WM_EVENT_COUNTERS */
   7666 
   7667 		tx_done++;
   7668 	}
   7669 	if (error)
   7670 		goto fail_1;
   7671 
   7672 	/* For receive */
   7673 	error = 0;
   7674 	rx_done = 0;
   7675 	for (i = 0; i < sc->sc_nqueues; i++) {
   7676 #ifdef WM_EVENT_COUNTERS
   7677 		const char *xname;
   7678 #endif
   7679 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7680 		rxq->rxq_sc = sc;
   7681 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7682 
   7683 		error = wm_alloc_rx_descs(sc, rxq);
   7684 		if (error)
   7685 			break;
   7686 
   7687 		error = wm_alloc_rx_buffer(sc, rxq);
   7688 		if (error) {
   7689 			wm_free_rx_descs(sc, rxq);
   7690 			break;
   7691 		}
   7692 
   7693 #ifdef WM_EVENT_COUNTERS
   7694 		xname = device_xname(sc->sc_dev);
   7695 
   7696 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7697 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7698 
   7699 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7700 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7701 #endif /* WM_EVENT_COUNTERS */
   7702 
   7703 		rx_done++;
   7704 	}
   7705 	if (error)
   7706 		goto fail_2;
   7707 
   7708 	return 0;
   7709 
   7710  fail_2:
   7711 	for (i = 0; i < rx_done; i++) {
   7712 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7713 		wm_free_rx_buffer(sc, rxq);
   7714 		wm_free_rx_descs(sc, rxq);
   7715 		if (rxq->rxq_lock)
   7716 			mutex_obj_free(rxq->rxq_lock);
   7717 	}
   7718  fail_1:
   7719 	for (i = 0; i < tx_done; i++) {
   7720 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7721 		pcq_destroy(txq->txq_interq);
   7722 		wm_free_tx_buffer(sc, txq);
   7723 		wm_free_tx_descs(sc, txq);
   7724 		if (txq->txq_lock)
   7725 			mutex_obj_free(txq->txq_lock);
   7726 	}
   7727 
   7728 	kmem_free(sc->sc_queue,
   7729 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7730  fail_0:
   7731 	return error;
   7732 }
   7733 
   7734 /*
   7735  * wm_free_quques:
   7736  *	Free {tx,rx}descs and {tx,rx} buffers
   7737  */
   7738 static void
   7739 wm_free_txrx_queues(struct wm_softc *sc)
   7740 {
   7741 	int i;
   7742 
   7743 	for (i = 0; i < sc->sc_nqueues; i++) {
   7744 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7745 
   7746 #ifdef WM_EVENT_COUNTERS
   7747 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7748 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7749 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7750 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7751 #endif /* WM_EVENT_COUNTERS */
   7752 
   7753 		wm_free_rx_buffer(sc, rxq);
   7754 		wm_free_rx_descs(sc, rxq);
   7755 		if (rxq->rxq_lock)
   7756 			mutex_obj_free(rxq->rxq_lock);
   7757 	}
   7758 
   7759 	for (i = 0; i < sc->sc_nqueues; i++) {
   7760 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7761 		struct mbuf *m;
   7762 #ifdef WM_EVENT_COUNTERS
   7763 		int j;
   7764 
   7765 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7766 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7767 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7768 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7769 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7770 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7771 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7772 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7773 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7774 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7775 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7776 
   7777 		for (j = 0; j < WM_NTXSEGS; j++)
   7778 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7779 
   7780 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7781 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7782 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7783 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7784 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7785 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7786 #endif /* WM_EVENT_COUNTERS */
   7787 
   7788 		/* Drain txq_interq */
   7789 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7790 			m_freem(m);
   7791 		pcq_destroy(txq->txq_interq);
   7792 
   7793 		wm_free_tx_buffer(sc, txq);
   7794 		wm_free_tx_descs(sc, txq);
   7795 		if (txq->txq_lock)
   7796 			mutex_obj_free(txq->txq_lock);
   7797 	}
   7798 
   7799 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7800 }
   7801 
   7802 static void
   7803 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7804 {
   7805 
   7806 	KASSERT(mutex_owned(txq->txq_lock));
   7807 
   7808 	/* Initialize the transmit descriptor ring. */
   7809 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7810 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7811 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7812 	txq->txq_free = WM_NTXDESC(txq);
   7813 	txq->txq_next = 0;
   7814 }
   7815 
   7816 static void
   7817 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7818     struct wm_txqueue *txq)
   7819 {
   7820 
   7821 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7822 		device_xname(sc->sc_dev), __func__));
   7823 	KASSERT(mutex_owned(txq->txq_lock));
   7824 
   7825 	if (sc->sc_type < WM_T_82543) {
   7826 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7827 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7828 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7829 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7830 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7831 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7832 	} else {
   7833 		int qid = wmq->wmq_id;
   7834 
   7835 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7836 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7837 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7838 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7839 
   7840 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7841 			/*
   7842 			 * Don't write TDT before TCTL.EN is set.
   7843 			 * See the document.
   7844 			 */
   7845 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7846 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7847 			    | TXDCTL_WTHRESH(0));
   7848 		else {
   7849 			/* XXX should update with AIM? */
   7850 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7851 			if (sc->sc_type >= WM_T_82540) {
   7852 				/* Should be the same */
   7853 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7854 			}
   7855 
   7856 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7857 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7858 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7859 		}
   7860 	}
   7861 }
   7862 
   7863 static void
   7864 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7865 {
   7866 	int i;
   7867 
   7868 	KASSERT(mutex_owned(txq->txq_lock));
   7869 
   7870 	/* Initialize the transmit job descriptors. */
   7871 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7872 		txq->txq_soft[i].txs_mbuf = NULL;
   7873 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7874 	txq->txq_snext = 0;
   7875 	txq->txq_sdirty = 0;
   7876 }
   7877 
   7878 static void
   7879 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7880     struct wm_txqueue *txq)
   7881 {
   7882 
   7883 	KASSERT(mutex_owned(txq->txq_lock));
   7884 
   7885 	/*
   7886 	 * Set up some register offsets that are different between
   7887 	 * the i82542 and the i82543 and later chips.
   7888 	 */
   7889 	if (sc->sc_type < WM_T_82543)
   7890 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7891 	else
   7892 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7893 
   7894 	wm_init_tx_descs(sc, txq);
   7895 	wm_init_tx_regs(sc, wmq, txq);
   7896 	wm_init_tx_buffer(sc, txq);
   7897 
   7898 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   7899 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   7900 
   7901 	txq->txq_sending = false;
   7902 }
   7903 
   7904 static void
   7905 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7906     struct wm_rxqueue *rxq)
   7907 {
   7908 
   7909 	KASSERT(mutex_owned(rxq->rxq_lock));
   7910 
   7911 	/*
   7912 	 * Initialize the receive descriptor and receive job
   7913 	 * descriptor rings.
   7914 	 */
   7915 	if (sc->sc_type < WM_T_82543) {
   7916 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7917 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7918 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7919 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7920 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7921 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7922 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7923 
   7924 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7925 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7926 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7927 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7928 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7929 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7930 	} else {
   7931 		int qid = wmq->wmq_id;
   7932 
   7933 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7934 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7935 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7936 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7937 
   7938 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7939 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7940 				panic("%s: MCLBYTES %d unsupported for 82575 "
   7941 				    "or higher\n", __func__, MCLBYTES);
   7942 
   7943 			/*
   7944 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   7945 			 * only.
   7946 			 */
   7947 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   7948 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   7949 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7950 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7951 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7952 			    | RXDCTL_WTHRESH(1));
   7953 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7954 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7955 		} else {
   7956 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7957 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7958 			/* XXX should update with AIM? */
   7959 			CSR_WRITE(sc, WMREG_RDTR,
   7960 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7961 			/* MUST be same */
   7962 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7963 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7964 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7965 		}
   7966 	}
   7967 }
   7968 
   7969 static int
   7970 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7971 {
   7972 	struct wm_rxsoft *rxs;
   7973 	int error, i;
   7974 
   7975 	KASSERT(mutex_owned(rxq->rxq_lock));
   7976 
   7977 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7978 		rxs = &rxq->rxq_soft[i];
   7979 		if (rxs->rxs_mbuf == NULL) {
   7980 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7981 				log(LOG_ERR, "%s: unable to allocate or map "
   7982 				    "rx buffer %d, error = %d\n",
   7983 				    device_xname(sc->sc_dev), i, error);
   7984 				/*
   7985 				 * XXX Should attempt to run with fewer receive
   7986 				 * XXX buffers instead of just failing.
   7987 				 */
   7988 				wm_rxdrain(rxq);
   7989 				return ENOMEM;
   7990 			}
   7991 		} else {
   7992 			/*
   7993 			 * For 82575 and 82576, the RX descriptors must be
   7994 			 * initialized after the setting of RCTL.EN in
   7995 			 * wm_set_filter()
   7996 			 */
   7997 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7998 				wm_init_rxdesc(rxq, i);
   7999 		}
   8000 	}
   8001 	rxq->rxq_ptr = 0;
   8002 	rxq->rxq_discard = 0;
   8003 	WM_RXCHAIN_RESET(rxq);
   8004 
   8005 	return 0;
   8006 }
   8007 
   8008 static int
   8009 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8010     struct wm_rxqueue *rxq)
   8011 {
   8012 
   8013 	KASSERT(mutex_owned(rxq->rxq_lock));
   8014 
   8015 	/*
   8016 	 * Set up some register offsets that are different between
   8017 	 * the i82542 and the i82543 and later chips.
   8018 	 */
   8019 	if (sc->sc_type < WM_T_82543)
   8020 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8021 	else
   8022 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8023 
   8024 	wm_init_rx_regs(sc, wmq, rxq);
   8025 	return wm_init_rx_buffer(sc, rxq);
   8026 }
   8027 
   8028 /*
   8029  * wm_init_quques:
   8030  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8031  */
   8032 static int
   8033 wm_init_txrx_queues(struct wm_softc *sc)
   8034 {
   8035 	int i, error = 0;
   8036 
   8037 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8038 		device_xname(sc->sc_dev), __func__));
   8039 
   8040 	for (i = 0; i < sc->sc_nqueues; i++) {
   8041 		struct wm_queue *wmq = &sc->sc_queue[i];
   8042 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8043 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8044 
   8045 		/*
   8046 		 * TODO
   8047 		 * Currently, use constant variable instead of AIM.
   8048 		 * Furthermore, the interrupt interval of multiqueue which use
   8049 		 * polling mode is less than default value.
   8050 		 * More tuning and AIM are required.
   8051 		 */
   8052 		if (wm_is_using_multiqueue(sc))
   8053 			wmq->wmq_itr = 50;
   8054 		else
   8055 			wmq->wmq_itr = sc->sc_itr_init;
   8056 		wmq->wmq_set_itr = true;
   8057 
   8058 		mutex_enter(txq->txq_lock);
   8059 		wm_init_tx_queue(sc, wmq, txq);
   8060 		mutex_exit(txq->txq_lock);
   8061 
   8062 		mutex_enter(rxq->rxq_lock);
   8063 		error = wm_init_rx_queue(sc, wmq, rxq);
   8064 		mutex_exit(rxq->rxq_lock);
   8065 		if (error)
   8066 			break;
   8067 	}
   8068 
   8069 	return error;
   8070 }
   8071 
   8072 /*
   8073  * wm_tx_offload:
   8074  *
   8075  *	Set up TCP/IP checksumming parameters for the
   8076  *	specified packet.
   8077  */
   8078 static void
   8079 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8080     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8081 {
   8082 	struct mbuf *m0 = txs->txs_mbuf;
   8083 	struct livengood_tcpip_ctxdesc *t;
   8084 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8085 	uint32_t ipcse;
   8086 	struct ether_header *eh;
   8087 	int offset, iphl;
   8088 	uint8_t fields;
   8089 
   8090 	/*
   8091 	 * XXX It would be nice if the mbuf pkthdr had offset
   8092 	 * fields for the protocol headers.
   8093 	 */
   8094 
   8095 	eh = mtod(m0, struct ether_header *);
   8096 	switch (htons(eh->ether_type)) {
   8097 	case ETHERTYPE_IP:
   8098 	case ETHERTYPE_IPV6:
   8099 		offset = ETHER_HDR_LEN;
   8100 		break;
   8101 
   8102 	case ETHERTYPE_VLAN:
   8103 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8104 		break;
   8105 
   8106 	default:
   8107 		/* Don't support this protocol or encapsulation. */
   8108 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8109 		txq->txq_last_hw_ipcs = 0;
   8110 		txq->txq_last_hw_tucs = 0;
   8111 		*fieldsp = 0;
   8112 		*cmdp = 0;
   8113 		return;
   8114 	}
   8115 
   8116 	if ((m0->m_pkthdr.csum_flags &
   8117 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8118 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8119 	} else
   8120 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8121 
   8122 	ipcse = offset + iphl - 1;
   8123 
   8124 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8125 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8126 	seg = 0;
   8127 	fields = 0;
   8128 
   8129 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8130 		int hlen = offset + iphl;
   8131 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8132 
   8133 		if (__predict_false(m0->m_len <
   8134 				    (hlen + sizeof(struct tcphdr)))) {
   8135 			/*
   8136 			 * TCP/IP headers are not in the first mbuf; we need
   8137 			 * to do this the slow and painful way. Let's just
   8138 			 * hope this doesn't happen very often.
   8139 			 */
   8140 			struct tcphdr th;
   8141 
   8142 			WM_Q_EVCNT_INCR(txq, tsopain);
   8143 
   8144 			m_copydata(m0, hlen, sizeof(th), &th);
   8145 			if (v4) {
   8146 				struct ip ip;
   8147 
   8148 				m_copydata(m0, offset, sizeof(ip), &ip);
   8149 				ip.ip_len = 0;
   8150 				m_copyback(m0,
   8151 				    offset + offsetof(struct ip, ip_len),
   8152 				    sizeof(ip.ip_len), &ip.ip_len);
   8153 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8154 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8155 			} else {
   8156 				struct ip6_hdr ip6;
   8157 
   8158 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8159 				ip6.ip6_plen = 0;
   8160 				m_copyback(m0,
   8161 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8162 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8163 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8164 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8165 			}
   8166 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8167 			    sizeof(th.th_sum), &th.th_sum);
   8168 
   8169 			hlen += th.th_off << 2;
   8170 		} else {
   8171 			/*
   8172 			 * TCP/IP headers are in the first mbuf; we can do
   8173 			 * this the easy way.
   8174 			 */
   8175 			struct tcphdr *th;
   8176 
   8177 			if (v4) {
   8178 				struct ip *ip =
   8179 				    (void *)(mtod(m0, char *) + offset);
   8180 				th = (void *)(mtod(m0, char *) + hlen);
   8181 
   8182 				ip->ip_len = 0;
   8183 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8184 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8185 			} else {
   8186 				struct ip6_hdr *ip6 =
   8187 				    (void *)(mtod(m0, char *) + offset);
   8188 				th = (void *)(mtod(m0, char *) + hlen);
   8189 
   8190 				ip6->ip6_plen = 0;
   8191 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8192 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8193 			}
   8194 			hlen += th->th_off << 2;
   8195 		}
   8196 
   8197 		if (v4) {
   8198 			WM_Q_EVCNT_INCR(txq, tso);
   8199 			cmdlen |= WTX_TCPIP_CMD_IP;
   8200 		} else {
   8201 			WM_Q_EVCNT_INCR(txq, tso6);
   8202 			ipcse = 0;
   8203 		}
   8204 		cmd |= WTX_TCPIP_CMD_TSE;
   8205 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8206 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8207 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8208 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8209 	}
   8210 
   8211 	/*
   8212 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8213 	 * offload feature, if we load the context descriptor, we
   8214 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8215 	 */
   8216 
   8217 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8218 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8219 	    WTX_TCPIP_IPCSE(ipcse);
   8220 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8221 		WM_Q_EVCNT_INCR(txq, ipsum);
   8222 		fields |= WTX_IXSM;
   8223 	}
   8224 
   8225 	offset += iphl;
   8226 
   8227 	if (m0->m_pkthdr.csum_flags &
   8228 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8229 		WM_Q_EVCNT_INCR(txq, tusum);
   8230 		fields |= WTX_TXSM;
   8231 		tucs = WTX_TCPIP_TUCSS(offset) |
   8232 		    WTX_TCPIP_TUCSO(offset +
   8233 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8234 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8235 	} else if ((m0->m_pkthdr.csum_flags &
   8236 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8237 		WM_Q_EVCNT_INCR(txq, tusum6);
   8238 		fields |= WTX_TXSM;
   8239 		tucs = WTX_TCPIP_TUCSS(offset) |
   8240 		    WTX_TCPIP_TUCSO(offset +
   8241 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8242 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8243 	} else {
   8244 		/* Just initialize it to a valid TCP context. */
   8245 		tucs = WTX_TCPIP_TUCSS(offset) |
   8246 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8247 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8248 	}
   8249 
   8250 	*cmdp = cmd;
   8251 	*fieldsp = fields;
   8252 
   8253 	/*
   8254 	 * We don't have to write context descriptor for every packet
   8255 	 * except for 82574. For 82574, we must write context descriptor
   8256 	 * for every packet when we use two descriptor queues.
   8257 	 *
   8258 	 * The 82574L can only remember the *last* context used
   8259 	 * regardless of queue that it was use for.  We cannot reuse
   8260 	 * contexts on this hardware platform and must generate a new
   8261 	 * context every time.  82574L hardware spec, section 7.2.6,
   8262 	 * second note.
   8263 	 */
   8264 	if (sc->sc_nqueues < 2) {
   8265 		/*
   8266 		 * Setting up new checksum offload context for every
   8267 		 * frames takes a lot of processing time for hardware.
   8268 		 * This also reduces performance a lot for small sized
   8269 		 * frames so avoid it if driver can use previously
   8270 		 * configured checksum offload context.
   8271 		 * For TSO, in theory we can use the same TSO context only if
   8272 		 * frame is the same type(IP/TCP) and the same MSS. However
   8273 		 * checking whether a frame has the same IP/TCP structure is a
   8274 		 * hard thing so just ignore that and always restablish a
   8275 		 * new TSO context.
   8276 		 */
   8277 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8278 		    == 0) {
   8279 			if (txq->txq_last_hw_cmd == cmd &&
   8280 			    txq->txq_last_hw_fields == fields &&
   8281 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8282 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8283 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8284 				return;
   8285 			}
   8286 		}
   8287 
   8288 		txq->txq_last_hw_cmd = cmd;
   8289 		txq->txq_last_hw_fields = fields;
   8290 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8291 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8292 	}
   8293 
   8294 	/* Fill in the context descriptor. */
   8295 	t = (struct livengood_tcpip_ctxdesc *)
   8296 	    &txq->txq_descs[txq->txq_next];
   8297 	t->tcpip_ipcs = htole32(ipcs);
   8298 	t->tcpip_tucs = htole32(tucs);
   8299 	t->tcpip_cmdlen = htole32(cmdlen);
   8300 	t->tcpip_seg = htole32(seg);
   8301 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8302 
   8303 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8304 	txs->txs_ndesc++;
   8305 }
   8306 
   8307 static inline int
   8308 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8309 {
   8310 	struct wm_softc *sc = ifp->if_softc;
   8311 	u_int cpuid = cpu_index(curcpu());
   8312 
   8313 	/*
   8314 	 * Currently, simple distribute strategy.
   8315 	 * TODO:
   8316 	 * distribute by flowid(RSS has value).
   8317 	 */
   8318 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8319 }
   8320 
   8321 static inline bool
   8322 wm_linkdown_discard(struct wm_txqueue *txq)
   8323 {
   8324 
   8325 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8326 		return true;
   8327 
   8328 	return false;
   8329 }
   8330 
   8331 /*
   8332  * wm_start:		[ifnet interface function]
   8333  *
   8334  *	Start packet transmission on the interface.
   8335  */
   8336 static void
   8337 wm_start(struct ifnet *ifp)
   8338 {
   8339 	struct wm_softc *sc = ifp->if_softc;
   8340 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8341 
   8342 	KASSERT(if_is_mpsafe(ifp));
   8343 	/*
   8344 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8345 	 */
   8346 
   8347 	mutex_enter(txq->txq_lock);
   8348 	if (!txq->txq_stopping)
   8349 		wm_start_locked(ifp);
   8350 	mutex_exit(txq->txq_lock);
   8351 }
   8352 
   8353 static void
   8354 wm_start_locked(struct ifnet *ifp)
   8355 {
   8356 	struct wm_softc *sc = ifp->if_softc;
   8357 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8358 
   8359 	wm_send_common_locked(ifp, txq, false);
   8360 }
   8361 
   8362 static int
   8363 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8364 {
   8365 	int qid;
   8366 	struct wm_softc *sc = ifp->if_softc;
   8367 	struct wm_txqueue *txq;
   8368 
   8369 	qid = wm_select_txqueue(ifp, m);
   8370 	txq = &sc->sc_queue[qid].wmq_txq;
   8371 
   8372 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8373 		m_freem(m);
   8374 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8375 		return ENOBUFS;
   8376 	}
   8377 
   8378 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8379 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8380 	if (m->m_flags & M_MCAST)
   8381 		if_statinc_ref(nsr, if_omcasts);
   8382 	IF_STAT_PUTREF(ifp);
   8383 
   8384 	if (mutex_tryenter(txq->txq_lock)) {
   8385 		if (!txq->txq_stopping)
   8386 			wm_transmit_locked(ifp, txq);
   8387 		mutex_exit(txq->txq_lock);
   8388 	}
   8389 
   8390 	return 0;
   8391 }
   8392 
   8393 static void
   8394 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8395 {
   8396 
   8397 	wm_send_common_locked(ifp, txq, true);
   8398 }
   8399 
   8400 static void
   8401 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8402     bool is_transmit)
   8403 {
   8404 	struct wm_softc *sc = ifp->if_softc;
   8405 	struct mbuf *m0;
   8406 	struct wm_txsoft *txs;
   8407 	bus_dmamap_t dmamap;
   8408 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8409 	bus_addr_t curaddr;
   8410 	bus_size_t seglen, curlen;
   8411 	uint32_t cksumcmd;
   8412 	uint8_t cksumfields;
   8413 	bool remap = true;
   8414 
   8415 	KASSERT(mutex_owned(txq->txq_lock));
   8416 	KASSERT(!txq->txq_stopping);
   8417 
   8418 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8419 		return;
   8420 
   8421 	if (__predict_false(wm_linkdown_discard(txq))) {
   8422 		do {
   8423 			if (is_transmit)
   8424 				m0 = pcq_get(txq->txq_interq);
   8425 			else
   8426 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8427 			/*
   8428 			 * increment successed packet counter as in the case
   8429 			 * which the packet is discarded by link down PHY.
   8430 			 */
   8431 			if (m0 != NULL) {
   8432 				if_statinc(ifp, if_opackets);
   8433 				m_freem(m0);
   8434 			}
   8435 		} while (m0 != NULL);
   8436 		return;
   8437 	}
   8438 
   8439 	/* Remember the previous number of free descriptors. */
   8440 	ofree = txq->txq_free;
   8441 
   8442 	/*
   8443 	 * Loop through the send queue, setting up transmit descriptors
   8444 	 * until we drain the queue, or use up all available transmit
   8445 	 * descriptors.
   8446 	 */
   8447 	for (;;) {
   8448 		m0 = NULL;
   8449 
   8450 		/* Get a work queue entry. */
   8451 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8452 			wm_txeof(txq, UINT_MAX);
   8453 			if (txq->txq_sfree == 0) {
   8454 				DPRINTF(sc, WM_DEBUG_TX,
   8455 				    ("%s: TX: no free job descriptors\n",
   8456 					device_xname(sc->sc_dev)));
   8457 				WM_Q_EVCNT_INCR(txq, txsstall);
   8458 				break;
   8459 			}
   8460 		}
   8461 
   8462 		/* Grab a packet off the queue. */
   8463 		if (is_transmit)
   8464 			m0 = pcq_get(txq->txq_interq);
   8465 		else
   8466 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8467 		if (m0 == NULL)
   8468 			break;
   8469 
   8470 		DPRINTF(sc, WM_DEBUG_TX,
   8471 		    ("%s: TX: have packet to transmit: %p\n",
   8472 			device_xname(sc->sc_dev), m0));
   8473 
   8474 		txs = &txq->txq_soft[txq->txq_snext];
   8475 		dmamap = txs->txs_dmamap;
   8476 
   8477 		use_tso = (m0->m_pkthdr.csum_flags &
   8478 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8479 
   8480 		/*
   8481 		 * So says the Linux driver:
   8482 		 * The controller does a simple calculation to make sure
   8483 		 * there is enough room in the FIFO before initiating the
   8484 		 * DMA for each buffer. The calc is:
   8485 		 *	4 = ceil(buffer len / MSS)
   8486 		 * To make sure we don't overrun the FIFO, adjust the max
   8487 		 * buffer len if the MSS drops.
   8488 		 */
   8489 		dmamap->dm_maxsegsz =
   8490 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8491 		    ? m0->m_pkthdr.segsz << 2
   8492 		    : WTX_MAX_LEN;
   8493 
   8494 		/*
   8495 		 * Load the DMA map.  If this fails, the packet either
   8496 		 * didn't fit in the allotted number of segments, or we
   8497 		 * were short on resources.  For the too-many-segments
   8498 		 * case, we simply report an error and drop the packet,
   8499 		 * since we can't sanely copy a jumbo packet to a single
   8500 		 * buffer.
   8501 		 */
   8502 retry:
   8503 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8504 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8505 		if (__predict_false(error)) {
   8506 			if (error == EFBIG) {
   8507 				if (remap == true) {
   8508 					struct mbuf *m;
   8509 
   8510 					remap = false;
   8511 					m = m_defrag(m0, M_NOWAIT);
   8512 					if (m != NULL) {
   8513 						WM_Q_EVCNT_INCR(txq, defrag);
   8514 						m0 = m;
   8515 						goto retry;
   8516 					}
   8517 				}
   8518 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8519 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8520 				    "DMA segments, dropping...\n",
   8521 				    device_xname(sc->sc_dev));
   8522 				wm_dump_mbuf_chain(sc, m0);
   8523 				m_freem(m0);
   8524 				continue;
   8525 			}
   8526 			/* Short on resources, just stop for now. */
   8527 			DPRINTF(sc, WM_DEBUG_TX,
   8528 			    ("%s: TX: dmamap load failed: %d\n",
   8529 				device_xname(sc->sc_dev), error));
   8530 			break;
   8531 		}
   8532 
   8533 		segs_needed = dmamap->dm_nsegs;
   8534 		if (use_tso) {
   8535 			/* For sentinel descriptor; see below. */
   8536 			segs_needed++;
   8537 		}
   8538 
   8539 		/*
   8540 		 * Ensure we have enough descriptors free to describe
   8541 		 * the packet. Note, we always reserve one descriptor
   8542 		 * at the end of the ring due to the semantics of the
   8543 		 * TDT register, plus one more in the event we need
   8544 		 * to load offload context.
   8545 		 */
   8546 		if (segs_needed > txq->txq_free - 2) {
   8547 			/*
   8548 			 * Not enough free descriptors to transmit this
   8549 			 * packet.  We haven't committed anything yet,
   8550 			 * so just unload the DMA map, put the packet
   8551 			 * pack on the queue, and punt. Notify the upper
   8552 			 * layer that there are no more slots left.
   8553 			 */
   8554 			DPRINTF(sc, WM_DEBUG_TX,
   8555 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8556 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8557 				segs_needed, txq->txq_free - 1));
   8558 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8559 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8560 			WM_Q_EVCNT_INCR(txq, txdstall);
   8561 			break;
   8562 		}
   8563 
   8564 		/*
   8565 		 * Check for 82547 Tx FIFO bug. We need to do this
   8566 		 * once we know we can transmit the packet, since we
   8567 		 * do some internal FIFO space accounting here.
   8568 		 */
   8569 		if (sc->sc_type == WM_T_82547 &&
   8570 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8571 			DPRINTF(sc, WM_DEBUG_TX,
   8572 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8573 				device_xname(sc->sc_dev)));
   8574 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8575 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8576 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8577 			break;
   8578 		}
   8579 
   8580 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8581 
   8582 		DPRINTF(sc, WM_DEBUG_TX,
   8583 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8584 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8585 
   8586 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8587 
   8588 		/*
   8589 		 * Store a pointer to the packet so that we can free it
   8590 		 * later.
   8591 		 *
   8592 		 * Initially, we consider the number of descriptors the
   8593 		 * packet uses the number of DMA segments.  This may be
   8594 		 * incremented by 1 if we do checksum offload (a descriptor
   8595 		 * is used to set the checksum context).
   8596 		 */
   8597 		txs->txs_mbuf = m0;
   8598 		txs->txs_firstdesc = txq->txq_next;
   8599 		txs->txs_ndesc = segs_needed;
   8600 
   8601 		/* Set up offload parameters for this packet. */
   8602 		if (m0->m_pkthdr.csum_flags &
   8603 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8604 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8605 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8606 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8607 		} else {
   8608 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8609 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8610 			cksumcmd = 0;
   8611 			cksumfields = 0;
   8612 		}
   8613 
   8614 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8615 
   8616 		/* Sync the DMA map. */
   8617 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8618 		    BUS_DMASYNC_PREWRITE);
   8619 
   8620 		/* Initialize the transmit descriptor. */
   8621 		for (nexttx = txq->txq_next, seg = 0;
   8622 		     seg < dmamap->dm_nsegs; seg++) {
   8623 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8624 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8625 			     seglen != 0;
   8626 			     curaddr += curlen, seglen -= curlen,
   8627 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8628 				curlen = seglen;
   8629 
   8630 				/*
   8631 				 * So says the Linux driver:
   8632 				 * Work around for premature descriptor
   8633 				 * write-backs in TSO mode.  Append a
   8634 				 * 4-byte sentinel descriptor.
   8635 				 */
   8636 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8637 				    curlen > 8)
   8638 					curlen -= 4;
   8639 
   8640 				wm_set_dma_addr(
   8641 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8642 				txq->txq_descs[nexttx].wtx_cmdlen
   8643 				    = htole32(cksumcmd | curlen);
   8644 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8645 				    = 0;
   8646 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8647 				    = cksumfields;
   8648 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8649 				lasttx = nexttx;
   8650 
   8651 				DPRINTF(sc, WM_DEBUG_TX,
   8652 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8653 					"len %#04zx\n",
   8654 					device_xname(sc->sc_dev), nexttx,
   8655 					(uint64_t)curaddr, curlen));
   8656 			}
   8657 		}
   8658 
   8659 		KASSERT(lasttx != -1);
   8660 
   8661 		/*
   8662 		 * Set up the command byte on the last descriptor of
   8663 		 * the packet. If we're in the interrupt delay window,
   8664 		 * delay the interrupt.
   8665 		 */
   8666 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8667 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8668 
   8669 		/*
   8670 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8671 		 * up the descriptor to encapsulate the packet for us.
   8672 		 *
   8673 		 * This is only valid on the last descriptor of the packet.
   8674 		 */
   8675 		if (vlan_has_tag(m0)) {
   8676 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8677 			    htole32(WTX_CMD_VLE);
   8678 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8679 			    = htole16(vlan_get_tag(m0));
   8680 		}
   8681 
   8682 		txs->txs_lastdesc = lasttx;
   8683 
   8684 		DPRINTF(sc, WM_DEBUG_TX,
   8685 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8686 			device_xname(sc->sc_dev),
   8687 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8688 
   8689 		/* Sync the descriptors we're using. */
   8690 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8691 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8692 
   8693 		/* Give the packet to the chip. */
   8694 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8695 
   8696 		DPRINTF(sc, WM_DEBUG_TX,
   8697 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8698 
   8699 		DPRINTF(sc, WM_DEBUG_TX,
   8700 		    ("%s: TX: finished transmitting packet, job %d\n",
   8701 			device_xname(sc->sc_dev), txq->txq_snext));
   8702 
   8703 		/* Advance the tx pointer. */
   8704 		txq->txq_free -= txs->txs_ndesc;
   8705 		txq->txq_next = nexttx;
   8706 
   8707 		txq->txq_sfree--;
   8708 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8709 
   8710 		/* Pass the packet to any BPF listeners. */
   8711 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8712 	}
   8713 
   8714 	if (m0 != NULL) {
   8715 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8716 		WM_Q_EVCNT_INCR(txq, descdrop);
   8717 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8718 			__func__));
   8719 		m_freem(m0);
   8720 	}
   8721 
   8722 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8723 		/* No more slots; notify upper layer. */
   8724 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8725 	}
   8726 
   8727 	if (txq->txq_free != ofree) {
   8728 		/* Set a watchdog timer in case the chip flakes out. */
   8729 		txq->txq_lastsent = time_uptime;
   8730 		txq->txq_sending = true;
   8731 	}
   8732 }
   8733 
   8734 /*
   8735  * wm_nq_tx_offload:
   8736  *
   8737  *	Set up TCP/IP checksumming parameters for the
   8738  *	specified packet, for NEWQUEUE devices
   8739  */
   8740 static void
   8741 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8742     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8743 {
   8744 	struct mbuf *m0 = txs->txs_mbuf;
   8745 	uint32_t vl_len, mssidx, cmdc;
   8746 	struct ether_header *eh;
   8747 	int offset, iphl;
   8748 
   8749 	/*
   8750 	 * XXX It would be nice if the mbuf pkthdr had offset
   8751 	 * fields for the protocol headers.
   8752 	 */
   8753 	*cmdlenp = 0;
   8754 	*fieldsp = 0;
   8755 
   8756 	eh = mtod(m0, struct ether_header *);
   8757 	switch (htons(eh->ether_type)) {
   8758 	case ETHERTYPE_IP:
   8759 	case ETHERTYPE_IPV6:
   8760 		offset = ETHER_HDR_LEN;
   8761 		break;
   8762 
   8763 	case ETHERTYPE_VLAN:
   8764 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8765 		break;
   8766 
   8767 	default:
   8768 		/* Don't support this protocol or encapsulation. */
   8769 		*do_csum = false;
   8770 		return;
   8771 	}
   8772 	*do_csum = true;
   8773 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8774 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8775 
   8776 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8777 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8778 
   8779 	if ((m0->m_pkthdr.csum_flags &
   8780 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8781 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8782 	} else {
   8783 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8784 	}
   8785 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8786 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8787 
   8788 	if (vlan_has_tag(m0)) {
   8789 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8790 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8791 		*cmdlenp |= NQTX_CMD_VLE;
   8792 	}
   8793 
   8794 	mssidx = 0;
   8795 
   8796 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8797 		int hlen = offset + iphl;
   8798 		int tcp_hlen;
   8799 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8800 
   8801 		if (__predict_false(m0->m_len <
   8802 				    (hlen + sizeof(struct tcphdr)))) {
   8803 			/*
   8804 			 * TCP/IP headers are not in the first mbuf; we need
   8805 			 * to do this the slow and painful way. Let's just
   8806 			 * hope this doesn't happen very often.
   8807 			 */
   8808 			struct tcphdr th;
   8809 
   8810 			WM_Q_EVCNT_INCR(txq, tsopain);
   8811 
   8812 			m_copydata(m0, hlen, sizeof(th), &th);
   8813 			if (v4) {
   8814 				struct ip ip;
   8815 
   8816 				m_copydata(m0, offset, sizeof(ip), &ip);
   8817 				ip.ip_len = 0;
   8818 				m_copyback(m0,
   8819 				    offset + offsetof(struct ip, ip_len),
   8820 				    sizeof(ip.ip_len), &ip.ip_len);
   8821 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8822 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8823 			} else {
   8824 				struct ip6_hdr ip6;
   8825 
   8826 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8827 				ip6.ip6_plen = 0;
   8828 				m_copyback(m0,
   8829 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8830 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8831 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8832 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8833 			}
   8834 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8835 			    sizeof(th.th_sum), &th.th_sum);
   8836 
   8837 			tcp_hlen = th.th_off << 2;
   8838 		} else {
   8839 			/*
   8840 			 * TCP/IP headers are in the first mbuf; we can do
   8841 			 * this the easy way.
   8842 			 */
   8843 			struct tcphdr *th;
   8844 
   8845 			if (v4) {
   8846 				struct ip *ip =
   8847 				    (void *)(mtod(m0, char *) + offset);
   8848 				th = (void *)(mtod(m0, char *) + hlen);
   8849 
   8850 				ip->ip_len = 0;
   8851 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8852 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8853 			} else {
   8854 				struct ip6_hdr *ip6 =
   8855 				    (void *)(mtod(m0, char *) + offset);
   8856 				th = (void *)(mtod(m0, char *) + hlen);
   8857 
   8858 				ip6->ip6_plen = 0;
   8859 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8860 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8861 			}
   8862 			tcp_hlen = th->th_off << 2;
   8863 		}
   8864 		hlen += tcp_hlen;
   8865 		*cmdlenp |= NQTX_CMD_TSE;
   8866 
   8867 		if (v4) {
   8868 			WM_Q_EVCNT_INCR(txq, tso);
   8869 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8870 		} else {
   8871 			WM_Q_EVCNT_INCR(txq, tso6);
   8872 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8873 		}
   8874 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8875 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8876 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8877 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8878 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8879 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8880 	} else {
   8881 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8882 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8883 	}
   8884 
   8885 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8886 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8887 		cmdc |= NQTXC_CMD_IP4;
   8888 	}
   8889 
   8890 	if (m0->m_pkthdr.csum_flags &
   8891 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8892 		WM_Q_EVCNT_INCR(txq, tusum);
   8893 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8894 			cmdc |= NQTXC_CMD_TCP;
   8895 		else
   8896 			cmdc |= NQTXC_CMD_UDP;
   8897 
   8898 		cmdc |= NQTXC_CMD_IP4;
   8899 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8900 	}
   8901 	if (m0->m_pkthdr.csum_flags &
   8902 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8903 		WM_Q_EVCNT_INCR(txq, tusum6);
   8904 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8905 			cmdc |= NQTXC_CMD_TCP;
   8906 		else
   8907 			cmdc |= NQTXC_CMD_UDP;
   8908 
   8909 		cmdc |= NQTXC_CMD_IP6;
   8910 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8911 	}
   8912 
   8913 	/*
   8914 	 * We don't have to write context descriptor for every packet to
   8915 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8916 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8917 	 * controllers.
   8918 	 * It would be overhead to write context descriptor for every packet,
   8919 	 * however it does not cause problems.
   8920 	 */
   8921 	/* Fill in the context descriptor. */
   8922 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8923 	    htole32(vl_len);
   8924 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8925 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8926 	    htole32(cmdc);
   8927 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8928 	    htole32(mssidx);
   8929 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8930 	DPRINTF(sc, WM_DEBUG_TX,
   8931 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8932 		txq->txq_next, 0, vl_len));
   8933 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8934 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8935 	txs->txs_ndesc++;
   8936 }
   8937 
   8938 /*
   8939  * wm_nq_start:		[ifnet interface function]
   8940  *
   8941  *	Start packet transmission on the interface for NEWQUEUE devices
   8942  */
   8943 static void
   8944 wm_nq_start(struct ifnet *ifp)
   8945 {
   8946 	struct wm_softc *sc = ifp->if_softc;
   8947 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8948 
   8949 	KASSERT(if_is_mpsafe(ifp));
   8950 	/*
   8951 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8952 	 */
   8953 
   8954 	mutex_enter(txq->txq_lock);
   8955 	if (!txq->txq_stopping)
   8956 		wm_nq_start_locked(ifp);
   8957 	mutex_exit(txq->txq_lock);
   8958 }
   8959 
   8960 static void
   8961 wm_nq_start_locked(struct ifnet *ifp)
   8962 {
   8963 	struct wm_softc *sc = ifp->if_softc;
   8964 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8965 
   8966 	wm_nq_send_common_locked(ifp, txq, false);
   8967 }
   8968 
   8969 static int
   8970 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8971 {
   8972 	int qid;
   8973 	struct wm_softc *sc = ifp->if_softc;
   8974 	struct wm_txqueue *txq;
   8975 
   8976 	qid = wm_select_txqueue(ifp, m);
   8977 	txq = &sc->sc_queue[qid].wmq_txq;
   8978 
   8979 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8980 		m_freem(m);
   8981 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8982 		return ENOBUFS;
   8983 	}
   8984 
   8985 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8986 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8987 	if (m->m_flags & M_MCAST)
   8988 		if_statinc_ref(nsr, if_omcasts);
   8989 	IF_STAT_PUTREF(ifp);
   8990 
   8991 	/*
   8992 	 * The situations which this mutex_tryenter() fails at running time
   8993 	 * are below two patterns.
   8994 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8995 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8996 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8997 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8998 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8999 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9000 	 * stuck, either.
   9001 	 */
   9002 	if (mutex_tryenter(txq->txq_lock)) {
   9003 		if (!txq->txq_stopping)
   9004 			wm_nq_transmit_locked(ifp, txq);
   9005 		mutex_exit(txq->txq_lock);
   9006 	}
   9007 
   9008 	return 0;
   9009 }
   9010 
   9011 static void
   9012 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9013 {
   9014 
   9015 	wm_nq_send_common_locked(ifp, txq, true);
   9016 }
   9017 
   9018 static void
   9019 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9020     bool is_transmit)
   9021 {
   9022 	struct wm_softc *sc = ifp->if_softc;
   9023 	struct mbuf *m0;
   9024 	struct wm_txsoft *txs;
   9025 	bus_dmamap_t dmamap;
   9026 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9027 	bool do_csum, sent;
   9028 	bool remap = true;
   9029 
   9030 	KASSERT(mutex_owned(txq->txq_lock));
   9031 	KASSERT(!txq->txq_stopping);
   9032 
   9033 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9034 		return;
   9035 
   9036 	if (__predict_false(wm_linkdown_discard(txq))) {
   9037 		do {
   9038 			if (is_transmit)
   9039 				m0 = pcq_get(txq->txq_interq);
   9040 			else
   9041 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9042 			/*
   9043 			 * increment successed packet counter as in the case
   9044 			 * which the packet is discarded by link down PHY.
   9045 			 */
   9046 			if (m0 != NULL) {
   9047 				if_statinc(ifp, if_opackets);
   9048 				m_freem(m0);
   9049 			}
   9050 		} while (m0 != NULL);
   9051 		return;
   9052 	}
   9053 
   9054 	sent = false;
   9055 
   9056 	/*
   9057 	 * Loop through the send queue, setting up transmit descriptors
   9058 	 * until we drain the queue, or use up all available transmit
   9059 	 * descriptors.
   9060 	 */
   9061 	for (;;) {
   9062 		m0 = NULL;
   9063 
   9064 		/* Get a work queue entry. */
   9065 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9066 			wm_txeof(txq, UINT_MAX);
   9067 			if (txq->txq_sfree == 0) {
   9068 				DPRINTF(sc, WM_DEBUG_TX,
   9069 				    ("%s: TX: no free job descriptors\n",
   9070 					device_xname(sc->sc_dev)));
   9071 				WM_Q_EVCNT_INCR(txq, txsstall);
   9072 				break;
   9073 			}
   9074 		}
   9075 
   9076 		/* Grab a packet off the queue. */
   9077 		if (is_transmit)
   9078 			m0 = pcq_get(txq->txq_interq);
   9079 		else
   9080 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9081 		if (m0 == NULL)
   9082 			break;
   9083 
   9084 		DPRINTF(sc, WM_DEBUG_TX,
   9085 		    ("%s: TX: have packet to transmit: %p\n",
   9086 		    device_xname(sc->sc_dev), m0));
   9087 
   9088 		txs = &txq->txq_soft[txq->txq_snext];
   9089 		dmamap = txs->txs_dmamap;
   9090 
   9091 		/*
   9092 		 * Load the DMA map.  If this fails, the packet either
   9093 		 * didn't fit in the allotted number of segments, or we
   9094 		 * were short on resources.  For the too-many-segments
   9095 		 * case, we simply report an error and drop the packet,
   9096 		 * since we can't sanely copy a jumbo packet to a single
   9097 		 * buffer.
   9098 		 */
   9099 retry:
   9100 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9101 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9102 		if (__predict_false(error)) {
   9103 			if (error == EFBIG) {
   9104 				if (remap == true) {
   9105 					struct mbuf *m;
   9106 
   9107 					remap = false;
   9108 					m = m_defrag(m0, M_NOWAIT);
   9109 					if (m != NULL) {
   9110 						WM_Q_EVCNT_INCR(txq, defrag);
   9111 						m0 = m;
   9112 						goto retry;
   9113 					}
   9114 				}
   9115 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9116 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9117 				    "DMA segments, dropping...\n",
   9118 				    device_xname(sc->sc_dev));
   9119 				wm_dump_mbuf_chain(sc, m0);
   9120 				m_freem(m0);
   9121 				continue;
   9122 			}
   9123 			/* Short on resources, just stop for now. */
   9124 			DPRINTF(sc, WM_DEBUG_TX,
   9125 			    ("%s: TX: dmamap load failed: %d\n",
   9126 				device_xname(sc->sc_dev), error));
   9127 			break;
   9128 		}
   9129 
   9130 		segs_needed = dmamap->dm_nsegs;
   9131 
   9132 		/*
   9133 		 * Ensure we have enough descriptors free to describe
   9134 		 * the packet. Note, we always reserve one descriptor
   9135 		 * at the end of the ring due to the semantics of the
   9136 		 * TDT register, plus one more in the event we need
   9137 		 * to load offload context.
   9138 		 */
   9139 		if (segs_needed > txq->txq_free - 2) {
   9140 			/*
   9141 			 * Not enough free descriptors to transmit this
   9142 			 * packet.  We haven't committed anything yet,
   9143 			 * so just unload the DMA map, put the packet
   9144 			 * pack on the queue, and punt. Notify the upper
   9145 			 * layer that there are no more slots left.
   9146 			 */
   9147 			DPRINTF(sc, WM_DEBUG_TX,
   9148 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9149 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9150 				segs_needed, txq->txq_free - 1));
   9151 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9152 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9153 			WM_Q_EVCNT_INCR(txq, txdstall);
   9154 			break;
   9155 		}
   9156 
   9157 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9158 
   9159 		DPRINTF(sc, WM_DEBUG_TX,
   9160 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9161 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9162 
   9163 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9164 
   9165 		/*
   9166 		 * Store a pointer to the packet so that we can free it
   9167 		 * later.
   9168 		 *
   9169 		 * Initially, we consider the number of descriptors the
   9170 		 * packet uses the number of DMA segments.  This may be
   9171 		 * incremented by 1 if we do checksum offload (a descriptor
   9172 		 * is used to set the checksum context).
   9173 		 */
   9174 		txs->txs_mbuf = m0;
   9175 		txs->txs_firstdesc = txq->txq_next;
   9176 		txs->txs_ndesc = segs_needed;
   9177 
   9178 		/* Set up offload parameters for this packet. */
   9179 		uint32_t cmdlen, fields, dcmdlen;
   9180 		if (m0->m_pkthdr.csum_flags &
   9181 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9182 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9183 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9184 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9185 			    &do_csum);
   9186 		} else {
   9187 			do_csum = false;
   9188 			cmdlen = 0;
   9189 			fields = 0;
   9190 		}
   9191 
   9192 		/* Sync the DMA map. */
   9193 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9194 		    BUS_DMASYNC_PREWRITE);
   9195 
   9196 		/* Initialize the first transmit descriptor. */
   9197 		nexttx = txq->txq_next;
   9198 		if (!do_csum) {
   9199 			/* Set up a legacy descriptor */
   9200 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9201 			    dmamap->dm_segs[0].ds_addr);
   9202 			txq->txq_descs[nexttx].wtx_cmdlen =
   9203 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9204 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9205 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9206 			if (vlan_has_tag(m0)) {
   9207 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9208 				    htole32(WTX_CMD_VLE);
   9209 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9210 				    htole16(vlan_get_tag(m0));
   9211 			} else
   9212 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9213 
   9214 			dcmdlen = 0;
   9215 		} else {
   9216 			/* Set up an advanced data descriptor */
   9217 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9218 			    htole64(dmamap->dm_segs[0].ds_addr);
   9219 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9220 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9221 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9222 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9223 			    htole32(fields);
   9224 			DPRINTF(sc, WM_DEBUG_TX,
   9225 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9226 				device_xname(sc->sc_dev), nexttx,
   9227 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9228 			DPRINTF(sc, WM_DEBUG_TX,
   9229 			    ("\t 0x%08x%08x\n", fields,
   9230 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9231 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9232 		}
   9233 
   9234 		lasttx = nexttx;
   9235 		nexttx = WM_NEXTTX(txq, nexttx);
   9236 		/*
   9237 		 * Fill in the next descriptors. Legacy or advanced format
   9238 		 * is the same here.
   9239 		 */
   9240 		for (seg = 1; seg < dmamap->dm_nsegs;
   9241 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9242 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9243 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9244 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9245 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9246 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9247 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9248 			lasttx = nexttx;
   9249 
   9250 			DPRINTF(sc, WM_DEBUG_TX,
   9251 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9252 				device_xname(sc->sc_dev), nexttx,
   9253 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9254 				dmamap->dm_segs[seg].ds_len));
   9255 		}
   9256 
   9257 		KASSERT(lasttx != -1);
   9258 
   9259 		/*
   9260 		 * Set up the command byte on the last descriptor of
   9261 		 * the packet. If we're in the interrupt delay window,
   9262 		 * delay the interrupt.
   9263 		 */
   9264 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9265 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9266 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9267 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9268 
   9269 		txs->txs_lastdesc = lasttx;
   9270 
   9271 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9272 		    device_xname(sc->sc_dev),
   9273 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9274 
   9275 		/* Sync the descriptors we're using. */
   9276 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9277 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9278 
   9279 		/* Give the packet to the chip. */
   9280 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9281 		sent = true;
   9282 
   9283 		DPRINTF(sc, WM_DEBUG_TX,
   9284 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9285 
   9286 		DPRINTF(sc, WM_DEBUG_TX,
   9287 		    ("%s: TX: finished transmitting packet, job %d\n",
   9288 			device_xname(sc->sc_dev), txq->txq_snext));
   9289 
   9290 		/* Advance the tx pointer. */
   9291 		txq->txq_free -= txs->txs_ndesc;
   9292 		txq->txq_next = nexttx;
   9293 
   9294 		txq->txq_sfree--;
   9295 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9296 
   9297 		/* Pass the packet to any BPF listeners. */
   9298 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9299 	}
   9300 
   9301 	if (m0 != NULL) {
   9302 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9303 		WM_Q_EVCNT_INCR(txq, descdrop);
   9304 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9305 			__func__));
   9306 		m_freem(m0);
   9307 	}
   9308 
   9309 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9310 		/* No more slots; notify upper layer. */
   9311 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9312 	}
   9313 
   9314 	if (sent) {
   9315 		/* Set a watchdog timer in case the chip flakes out. */
   9316 		txq->txq_lastsent = time_uptime;
   9317 		txq->txq_sending = true;
   9318 	}
   9319 }
   9320 
   9321 static void
   9322 wm_deferred_start_locked(struct wm_txqueue *txq)
   9323 {
   9324 	struct wm_softc *sc = txq->txq_sc;
   9325 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9326 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9327 	int qid = wmq->wmq_id;
   9328 
   9329 	KASSERT(mutex_owned(txq->txq_lock));
   9330 	KASSERT(!txq->txq_stopping);
   9331 
   9332 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9333 		/* XXX need for ALTQ or one CPU system */
   9334 		if (qid == 0)
   9335 			wm_nq_start_locked(ifp);
   9336 		wm_nq_transmit_locked(ifp, txq);
   9337 	} else {
   9338 		/* XXX need for ALTQ or one CPU system */
   9339 		if (qid == 0)
   9340 			wm_start_locked(ifp);
   9341 		wm_transmit_locked(ifp, txq);
   9342 	}
   9343 }
   9344 
   9345 /* Interrupt */
   9346 
   9347 /*
   9348  * wm_txeof:
   9349  *
   9350  *	Helper; handle transmit interrupts.
   9351  */
   9352 static bool
   9353 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9354 {
   9355 	struct wm_softc *sc = txq->txq_sc;
   9356 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9357 	struct wm_txsoft *txs;
   9358 	int count = 0;
   9359 	int i;
   9360 	uint8_t status;
   9361 	bool more = false;
   9362 
   9363 	KASSERT(mutex_owned(txq->txq_lock));
   9364 
   9365 	if (txq->txq_stopping)
   9366 		return false;
   9367 
   9368 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9369 
   9370 	/*
   9371 	 * Go through the Tx list and free mbufs for those
   9372 	 * frames which have been transmitted.
   9373 	 */
   9374 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9375 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9376 		txs = &txq->txq_soft[i];
   9377 
   9378 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9379 			device_xname(sc->sc_dev), i));
   9380 
   9381 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9382 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9383 
   9384 		status =
   9385 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9386 		if ((status & WTX_ST_DD) == 0) {
   9387 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9388 			    BUS_DMASYNC_PREREAD);
   9389 			break;
   9390 		}
   9391 
   9392 		if (limit-- == 0) {
   9393 			more = true;
   9394 			DPRINTF(sc, WM_DEBUG_TX,
   9395 			    ("%s: TX: loop limited, job %d is not processed\n",
   9396 				device_xname(sc->sc_dev), i));
   9397 			break;
   9398 		}
   9399 
   9400 		count++;
   9401 		DPRINTF(sc, WM_DEBUG_TX,
   9402 		    ("%s: TX: job %d done: descs %d..%d\n",
   9403 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9404 		    txs->txs_lastdesc));
   9405 
   9406 		/*
   9407 		 * XXX We should probably be using the statistics
   9408 		 * XXX registers, but I don't know if they exist
   9409 		 * XXX on chips before the i82544.
   9410 		 */
   9411 
   9412 #ifdef WM_EVENT_COUNTERS
   9413 		if (status & WTX_ST_TU)
   9414 			WM_Q_EVCNT_INCR(txq, underrun);
   9415 #endif /* WM_EVENT_COUNTERS */
   9416 
   9417 		/*
   9418 		 * 82574 and newer's document says the status field has neither
   9419 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9420 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9421 		 * Developer's Manual", 82574 datasheet and newer.
   9422 		 *
   9423 		 * XXX I saw the LC bit was set on I218 even though the media
   9424 		 * was full duplex, so the bit might be used for other
   9425 		 * meaning ...(I have no document).
   9426 		 */
   9427 
   9428 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9429 		    && ((sc->sc_type < WM_T_82574)
   9430 			|| (sc->sc_type == WM_T_80003))) {
   9431 			if_statinc(ifp, if_oerrors);
   9432 			if (status & WTX_ST_LC)
   9433 				log(LOG_WARNING, "%s: late collision\n",
   9434 				    device_xname(sc->sc_dev));
   9435 			else if (status & WTX_ST_EC) {
   9436 				if_statadd(ifp, if_collisions,
   9437 				    TX_COLLISION_THRESHOLD + 1);
   9438 				log(LOG_WARNING, "%s: excessive collisions\n",
   9439 				    device_xname(sc->sc_dev));
   9440 			}
   9441 		} else
   9442 			if_statinc(ifp, if_opackets);
   9443 
   9444 		txq->txq_packets++;
   9445 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9446 
   9447 		txq->txq_free += txs->txs_ndesc;
   9448 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9449 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9450 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9451 		m_freem(txs->txs_mbuf);
   9452 		txs->txs_mbuf = NULL;
   9453 	}
   9454 
   9455 	/* Update the dirty transmit buffer pointer. */
   9456 	txq->txq_sdirty = i;
   9457 	DPRINTF(sc, WM_DEBUG_TX,
   9458 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9459 
   9460 	if (count != 0)
   9461 		rnd_add_uint32(&sc->rnd_source, count);
   9462 
   9463 	/*
   9464 	 * If there are no more pending transmissions, cancel the watchdog
   9465 	 * timer.
   9466 	 */
   9467 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9468 		txq->txq_sending = false;
   9469 
   9470 	return more;
   9471 }
   9472 
   9473 static inline uint32_t
   9474 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9475 {
   9476 	struct wm_softc *sc = rxq->rxq_sc;
   9477 
   9478 	if (sc->sc_type == WM_T_82574)
   9479 		return EXTRXC_STATUS(
   9480 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9481 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9482 		return NQRXC_STATUS(
   9483 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9484 	else
   9485 		return rxq->rxq_descs[idx].wrx_status;
   9486 }
   9487 
   9488 static inline uint32_t
   9489 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9490 {
   9491 	struct wm_softc *sc = rxq->rxq_sc;
   9492 
   9493 	if (sc->sc_type == WM_T_82574)
   9494 		return EXTRXC_ERROR(
   9495 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9496 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9497 		return NQRXC_ERROR(
   9498 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9499 	else
   9500 		return rxq->rxq_descs[idx].wrx_errors;
   9501 }
   9502 
   9503 static inline uint16_t
   9504 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9505 {
   9506 	struct wm_softc *sc = rxq->rxq_sc;
   9507 
   9508 	if (sc->sc_type == WM_T_82574)
   9509 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9510 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9511 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9512 	else
   9513 		return rxq->rxq_descs[idx].wrx_special;
   9514 }
   9515 
   9516 static inline int
   9517 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9518 {
   9519 	struct wm_softc *sc = rxq->rxq_sc;
   9520 
   9521 	if (sc->sc_type == WM_T_82574)
   9522 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9523 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9524 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9525 	else
   9526 		return rxq->rxq_descs[idx].wrx_len;
   9527 }
   9528 
   9529 #ifdef WM_DEBUG
   9530 static inline uint32_t
   9531 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9532 {
   9533 	struct wm_softc *sc = rxq->rxq_sc;
   9534 
   9535 	if (sc->sc_type == WM_T_82574)
   9536 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9537 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9538 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9539 	else
   9540 		return 0;
   9541 }
   9542 
   9543 static inline uint8_t
   9544 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9545 {
   9546 	struct wm_softc *sc = rxq->rxq_sc;
   9547 
   9548 	if (sc->sc_type == WM_T_82574)
   9549 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9550 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9551 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9552 	else
   9553 		return 0;
   9554 }
   9555 #endif /* WM_DEBUG */
   9556 
   9557 static inline bool
   9558 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9559     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9560 {
   9561 
   9562 	if (sc->sc_type == WM_T_82574)
   9563 		return (status & ext_bit) != 0;
   9564 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9565 		return (status & nq_bit) != 0;
   9566 	else
   9567 		return (status & legacy_bit) != 0;
   9568 }
   9569 
   9570 static inline bool
   9571 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9572     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9573 {
   9574 
   9575 	if (sc->sc_type == WM_T_82574)
   9576 		return (error & ext_bit) != 0;
   9577 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9578 		return (error & nq_bit) != 0;
   9579 	else
   9580 		return (error & legacy_bit) != 0;
   9581 }
   9582 
   9583 static inline bool
   9584 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9585 {
   9586 
   9587 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9588 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9589 		return true;
   9590 	else
   9591 		return false;
   9592 }
   9593 
   9594 static inline bool
   9595 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9596 {
   9597 	struct wm_softc *sc = rxq->rxq_sc;
   9598 
   9599 	/* XXX missing error bit for newqueue? */
   9600 	if (wm_rxdesc_is_set_error(sc, errors,
   9601 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9602 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9603 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9604 		NQRXC_ERROR_RXE)) {
   9605 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9606 		    EXTRXC_ERROR_SE, 0))
   9607 			log(LOG_WARNING, "%s: symbol error\n",
   9608 			    device_xname(sc->sc_dev));
   9609 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9610 		    EXTRXC_ERROR_SEQ, 0))
   9611 			log(LOG_WARNING, "%s: receive sequence error\n",
   9612 			    device_xname(sc->sc_dev));
   9613 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9614 		    EXTRXC_ERROR_CE, 0))
   9615 			log(LOG_WARNING, "%s: CRC error\n",
   9616 			    device_xname(sc->sc_dev));
   9617 		return true;
   9618 	}
   9619 
   9620 	return false;
   9621 }
   9622 
   9623 static inline bool
   9624 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9625 {
   9626 	struct wm_softc *sc = rxq->rxq_sc;
   9627 
   9628 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9629 		NQRXC_STATUS_DD)) {
   9630 		/* We have processed all of the receive descriptors. */
   9631 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9632 		return false;
   9633 	}
   9634 
   9635 	return true;
   9636 }
   9637 
   9638 static inline bool
   9639 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9640     uint16_t vlantag, struct mbuf *m)
   9641 {
   9642 
   9643 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9644 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9645 		vlan_set_tag(m, le16toh(vlantag));
   9646 	}
   9647 
   9648 	return true;
   9649 }
   9650 
   9651 static inline void
   9652 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9653     uint32_t errors, struct mbuf *m)
   9654 {
   9655 	struct wm_softc *sc = rxq->rxq_sc;
   9656 
   9657 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9658 		if (wm_rxdesc_is_set_status(sc, status,
   9659 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9660 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9661 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9662 			if (wm_rxdesc_is_set_error(sc, errors,
   9663 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9664 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9665 		}
   9666 		if (wm_rxdesc_is_set_status(sc, status,
   9667 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9668 			/*
   9669 			 * Note: we don't know if this was TCP or UDP,
   9670 			 * so we just set both bits, and expect the
   9671 			 * upper layers to deal.
   9672 			 */
   9673 			WM_Q_EVCNT_INCR(rxq, tusum);
   9674 			m->m_pkthdr.csum_flags |=
   9675 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9676 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9677 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9678 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9679 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9680 		}
   9681 	}
   9682 }
   9683 
   9684 /*
   9685  * wm_rxeof:
   9686  *
   9687  *	Helper; handle receive interrupts.
   9688  */
   9689 static bool
   9690 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9691 {
   9692 	struct wm_softc *sc = rxq->rxq_sc;
   9693 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9694 	struct wm_rxsoft *rxs;
   9695 	struct mbuf *m;
   9696 	int i, len;
   9697 	int count = 0;
   9698 	uint32_t status, errors;
   9699 	uint16_t vlantag;
   9700 	bool more = false;
   9701 
   9702 	KASSERT(mutex_owned(rxq->rxq_lock));
   9703 
   9704 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9705 		rxs = &rxq->rxq_soft[i];
   9706 
   9707 		DPRINTF(sc, WM_DEBUG_RX,
   9708 		    ("%s: RX: checking descriptor %d\n",
   9709 			device_xname(sc->sc_dev), i));
   9710 		wm_cdrxsync(rxq, i,
   9711 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9712 
   9713 		status = wm_rxdesc_get_status(rxq, i);
   9714 		errors = wm_rxdesc_get_errors(rxq, i);
   9715 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9716 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9717 #ifdef WM_DEBUG
   9718 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9719 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9720 #endif
   9721 
   9722 		if (!wm_rxdesc_dd(rxq, i, status))
   9723 			break;
   9724 
   9725 		if (limit-- == 0) {
   9726 			more = true;
   9727 			DPRINTF(sc, WM_DEBUG_RX,
   9728 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9729 				device_xname(sc->sc_dev), i));
   9730 			break;
   9731 		}
   9732 
   9733 		count++;
   9734 		if (__predict_false(rxq->rxq_discard)) {
   9735 			DPRINTF(sc, WM_DEBUG_RX,
   9736 			    ("%s: RX: discarding contents of descriptor %d\n",
   9737 				device_xname(sc->sc_dev), i));
   9738 			wm_init_rxdesc(rxq, i);
   9739 			if (wm_rxdesc_is_eop(rxq, status)) {
   9740 				/* Reset our state. */
   9741 				DPRINTF(sc, WM_DEBUG_RX,
   9742 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9743 					device_xname(sc->sc_dev)));
   9744 				rxq->rxq_discard = 0;
   9745 			}
   9746 			continue;
   9747 		}
   9748 
   9749 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9750 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9751 
   9752 		m = rxs->rxs_mbuf;
   9753 
   9754 		/*
   9755 		 * Add a new receive buffer to the ring, unless of
   9756 		 * course the length is zero. Treat the latter as a
   9757 		 * failed mapping.
   9758 		 */
   9759 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9760 			/*
   9761 			 * Failed, throw away what we've done so
   9762 			 * far, and discard the rest of the packet.
   9763 			 */
   9764 			if_statinc(ifp, if_ierrors);
   9765 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9766 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9767 			wm_init_rxdesc(rxq, i);
   9768 			if (!wm_rxdesc_is_eop(rxq, status))
   9769 				rxq->rxq_discard = 1;
   9770 			if (rxq->rxq_head != NULL)
   9771 				m_freem(rxq->rxq_head);
   9772 			WM_RXCHAIN_RESET(rxq);
   9773 			DPRINTF(sc, WM_DEBUG_RX,
   9774 			    ("%s: RX: Rx buffer allocation failed, "
   9775 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9776 				rxq->rxq_discard ? " (discard)" : ""));
   9777 			continue;
   9778 		}
   9779 
   9780 		m->m_len = len;
   9781 		rxq->rxq_len += len;
   9782 		DPRINTF(sc, WM_DEBUG_RX,
   9783 		    ("%s: RX: buffer at %p len %d\n",
   9784 			device_xname(sc->sc_dev), m->m_data, len));
   9785 
   9786 		/* If this is not the end of the packet, keep looking. */
   9787 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9788 			WM_RXCHAIN_LINK(rxq, m);
   9789 			DPRINTF(sc, WM_DEBUG_RX,
   9790 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9791 				device_xname(sc->sc_dev), rxq->rxq_len));
   9792 			continue;
   9793 		}
   9794 
   9795 		/*
   9796 		 * Okay, we have the entire packet now. The chip is
   9797 		 * configured to include the FCS except I35[04], I21[01].
   9798 		 * (not all chips can be configured to strip it), so we need
   9799 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9800 		 * in RCTL register is always set, so we don't trim it.
   9801 		 * PCH2 and newer chip also not include FCS when jumbo
   9802 		 * frame is used to do workaround an errata.
   9803 		 * May need to adjust length of previous mbuf in the
   9804 		 * chain if the current mbuf is too short.
   9805 		 */
   9806 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9807 			if (m->m_len < ETHER_CRC_LEN) {
   9808 				rxq->rxq_tail->m_len
   9809 				    -= (ETHER_CRC_LEN - m->m_len);
   9810 				m->m_len = 0;
   9811 			} else
   9812 				m->m_len -= ETHER_CRC_LEN;
   9813 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9814 		} else
   9815 			len = rxq->rxq_len;
   9816 
   9817 		WM_RXCHAIN_LINK(rxq, m);
   9818 
   9819 		*rxq->rxq_tailp = NULL;
   9820 		m = rxq->rxq_head;
   9821 
   9822 		WM_RXCHAIN_RESET(rxq);
   9823 
   9824 		DPRINTF(sc, WM_DEBUG_RX,
   9825 		    ("%s: RX: have entire packet, len -> %d\n",
   9826 			device_xname(sc->sc_dev), len));
   9827 
   9828 		/* If an error occurred, update stats and drop the packet. */
   9829 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9830 			m_freem(m);
   9831 			continue;
   9832 		}
   9833 
   9834 		/* No errors.  Receive the packet. */
   9835 		m_set_rcvif(m, ifp);
   9836 		m->m_pkthdr.len = len;
   9837 		/*
   9838 		 * TODO
   9839 		 * should be save rsshash and rsstype to this mbuf.
   9840 		 */
   9841 		DPRINTF(sc, WM_DEBUG_RX,
   9842 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9843 			device_xname(sc->sc_dev), rsstype, rsshash));
   9844 
   9845 		/*
   9846 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9847 		 * for us.  Associate the tag with the packet.
   9848 		 */
   9849 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9850 			continue;
   9851 
   9852 		/* Set up checksum info for this packet. */
   9853 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9854 
   9855 		rxq->rxq_packets++;
   9856 		rxq->rxq_bytes += len;
   9857 		/* Pass it on. */
   9858 		if_percpuq_enqueue(sc->sc_ipq, m);
   9859 
   9860 		if (rxq->rxq_stopping)
   9861 			break;
   9862 	}
   9863 	rxq->rxq_ptr = i;
   9864 
   9865 	if (count != 0)
   9866 		rnd_add_uint32(&sc->rnd_source, count);
   9867 
   9868 	DPRINTF(sc, WM_DEBUG_RX,
   9869 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9870 
   9871 	return more;
   9872 }
   9873 
   9874 /*
   9875  * wm_linkintr_gmii:
   9876  *
   9877  *	Helper; handle link interrupts for GMII.
   9878  */
   9879 static void
   9880 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9881 {
   9882 	device_t dev = sc->sc_dev;
   9883 	uint32_t status, reg;
   9884 	bool link;
   9885 	int rv;
   9886 
   9887 	KASSERT(mutex_owned(sc->sc_core_lock));
   9888 
   9889 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9890 		__func__));
   9891 
   9892 	if ((icr & ICR_LSC) == 0) {
   9893 		if (icr & ICR_RXSEQ)
   9894 			DPRINTF(sc, WM_DEBUG_LINK,
   9895 			    ("%s: LINK Receive sequence error\n",
   9896 				device_xname(dev)));
   9897 		return;
   9898 	}
   9899 
   9900 	/* Link status changed */
   9901 	status = CSR_READ(sc, WMREG_STATUS);
   9902 	link = status & STATUS_LU;
   9903 	if (link) {
   9904 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9905 			device_xname(dev),
   9906 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9907 		if (wm_phy_need_linkdown_discard(sc)) {
   9908 			DPRINTF(sc, WM_DEBUG_LINK,
   9909 			    ("%s: linkintr: Clear linkdown discard flag\n",
   9910 				device_xname(dev)));
   9911 			wm_clear_linkdown_discard(sc);
   9912 		}
   9913 	} else {
   9914 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9915 			device_xname(dev)));
   9916 		if (wm_phy_need_linkdown_discard(sc)) {
   9917 			DPRINTF(sc, WM_DEBUG_LINK,
   9918 			    ("%s: linkintr: Set linkdown discard flag\n",
   9919 				device_xname(dev)));
   9920 			wm_set_linkdown_discard(sc);
   9921 		}
   9922 	}
   9923 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9924 		wm_gig_downshift_workaround_ich8lan(sc);
   9925 
   9926 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   9927 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9928 
   9929 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9930 		device_xname(dev)));
   9931 	mii_pollstat(&sc->sc_mii);
   9932 	if (sc->sc_type == WM_T_82543) {
   9933 		int miistatus, active;
   9934 
   9935 		/*
   9936 		 * With 82543, we need to force speed and
   9937 		 * duplex on the MAC equal to what the PHY
   9938 		 * speed and duplex configuration is.
   9939 		 */
   9940 		miistatus = sc->sc_mii.mii_media_status;
   9941 
   9942 		if (miistatus & IFM_ACTIVE) {
   9943 			active = sc->sc_mii.mii_media_active;
   9944 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9945 			switch (IFM_SUBTYPE(active)) {
   9946 			case IFM_10_T:
   9947 				sc->sc_ctrl |= CTRL_SPEED_10;
   9948 				break;
   9949 			case IFM_100_TX:
   9950 				sc->sc_ctrl |= CTRL_SPEED_100;
   9951 				break;
   9952 			case IFM_1000_T:
   9953 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9954 				break;
   9955 			default:
   9956 				/*
   9957 				 * Fiber?
   9958 				 * Shoud not enter here.
   9959 				 */
   9960 				device_printf(dev, "unknown media (%x)\n",
   9961 				    active);
   9962 				break;
   9963 			}
   9964 			if (active & IFM_FDX)
   9965 				sc->sc_ctrl |= CTRL_FD;
   9966 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9967 		}
   9968 	} else if (sc->sc_type == WM_T_PCH) {
   9969 		wm_k1_gig_workaround_hv(sc,
   9970 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9971 	}
   9972 
   9973 	/*
   9974 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9975 	 * aggressive resulting in many collisions. To avoid this, increase
   9976 	 * the IPG and reduce Rx latency in the PHY.
   9977 	 */
   9978 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9979 	    && link) {
   9980 		uint32_t tipg_reg;
   9981 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9982 		bool fdx;
   9983 		uint16_t emi_addr, emi_val;
   9984 
   9985 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9986 		tipg_reg &= ~TIPG_IPGT_MASK;
   9987 		fdx = status & STATUS_FD;
   9988 
   9989 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9990 			tipg_reg |= 0xff;
   9991 			/* Reduce Rx latency in analog PHY */
   9992 			emi_val = 0;
   9993 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9994 		    fdx && speed != STATUS_SPEED_1000) {
   9995 			tipg_reg |= 0xc;
   9996 			emi_val = 1;
   9997 		} else {
   9998 			/* Roll back the default values */
   9999 			tipg_reg |= 0x08;
   10000 			emi_val = 1;
   10001 		}
   10002 
   10003 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10004 
   10005 		rv = sc->phy.acquire(sc);
   10006 		if (rv)
   10007 			return;
   10008 
   10009 		if (sc->sc_type == WM_T_PCH2)
   10010 			emi_addr = I82579_RX_CONFIG;
   10011 		else
   10012 			emi_addr = I217_RX_CONFIG;
   10013 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10014 
   10015 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10016 			uint16_t phy_reg;
   10017 
   10018 			sc->phy.readreg_locked(dev, 2,
   10019 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10020 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10021 			if (speed == STATUS_SPEED_100
   10022 			    || speed == STATUS_SPEED_10)
   10023 				phy_reg |= 0x3e8;
   10024 			else
   10025 				phy_reg |= 0xfa;
   10026 			sc->phy.writereg_locked(dev, 2,
   10027 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10028 
   10029 			if (speed == STATUS_SPEED_1000) {
   10030 				sc->phy.readreg_locked(dev, 2,
   10031 				    HV_PM_CTRL, &phy_reg);
   10032 
   10033 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10034 
   10035 				sc->phy.writereg_locked(dev, 2,
   10036 				    HV_PM_CTRL, phy_reg);
   10037 			}
   10038 		}
   10039 		sc->phy.release(sc);
   10040 
   10041 		if (rv)
   10042 			return;
   10043 
   10044 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10045 			uint16_t data, ptr_gap;
   10046 
   10047 			if (speed == STATUS_SPEED_1000) {
   10048 				rv = sc->phy.acquire(sc);
   10049 				if (rv)
   10050 					return;
   10051 
   10052 				rv = sc->phy.readreg_locked(dev, 2,
   10053 				    I82579_UNKNOWN1, &data);
   10054 				if (rv) {
   10055 					sc->phy.release(sc);
   10056 					return;
   10057 				}
   10058 
   10059 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10060 				if (ptr_gap < 0x18) {
   10061 					data &= ~(0x3ff << 2);
   10062 					data |= (0x18 << 2);
   10063 					rv = sc->phy.writereg_locked(dev,
   10064 					    2, I82579_UNKNOWN1, data);
   10065 				}
   10066 				sc->phy.release(sc);
   10067 				if (rv)
   10068 					return;
   10069 			} else {
   10070 				rv = sc->phy.acquire(sc);
   10071 				if (rv)
   10072 					return;
   10073 
   10074 				rv = sc->phy.writereg_locked(dev, 2,
   10075 				    I82579_UNKNOWN1, 0xc023);
   10076 				sc->phy.release(sc);
   10077 				if (rv)
   10078 					return;
   10079 
   10080 			}
   10081 		}
   10082 	}
   10083 
   10084 	/*
   10085 	 * I217 Packet Loss issue:
   10086 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10087 	 * on power up.
   10088 	 * Set the Beacon Duration for I217 to 8 usec
   10089 	 */
   10090 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10091 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10092 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10093 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10094 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10095 	}
   10096 
   10097 	/* Work-around I218 hang issue */
   10098 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10099 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10100 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10101 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10102 		wm_k1_workaround_lpt_lp(sc, link);
   10103 
   10104 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10105 		/*
   10106 		 * Set platform power management values for Latency
   10107 		 * Tolerance Reporting (LTR)
   10108 		 */
   10109 		wm_platform_pm_pch_lpt(sc,
   10110 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10111 	}
   10112 
   10113 	/* Clear link partner's EEE ability */
   10114 	sc->eee_lp_ability = 0;
   10115 
   10116 	/* FEXTNVM6 K1-off workaround */
   10117 	if (sc->sc_type == WM_T_PCH_SPT) {
   10118 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10119 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10120 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10121 		else
   10122 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10123 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10124 	}
   10125 
   10126 	if (!link)
   10127 		return;
   10128 
   10129 	switch (sc->sc_type) {
   10130 	case WM_T_PCH2:
   10131 		wm_k1_workaround_lv(sc);
   10132 		/* FALLTHROUGH */
   10133 	case WM_T_PCH:
   10134 		if (sc->sc_phytype == WMPHY_82578)
   10135 			wm_link_stall_workaround_hv(sc);
   10136 		break;
   10137 	default:
   10138 		break;
   10139 	}
   10140 
   10141 	/* Enable/Disable EEE after link up */
   10142 	if (sc->sc_phytype > WMPHY_82579)
   10143 		wm_set_eee_pchlan(sc);
   10144 }
   10145 
   10146 /*
   10147  * wm_linkintr_tbi:
   10148  *
   10149  *	Helper; handle link interrupts for TBI mode.
   10150  */
   10151 static void
   10152 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10153 {
   10154 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10155 	uint32_t status;
   10156 
   10157 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10158 		__func__));
   10159 
   10160 	status = CSR_READ(sc, WMREG_STATUS);
   10161 	if (icr & ICR_LSC) {
   10162 		wm_check_for_link(sc);
   10163 		if (status & STATUS_LU) {
   10164 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10165 				device_xname(sc->sc_dev),
   10166 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10167 			/*
   10168 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10169 			 * so we should update sc->sc_ctrl
   10170 			 */
   10171 
   10172 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10173 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10174 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10175 			if (status & STATUS_FD)
   10176 				sc->sc_tctl |=
   10177 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10178 			else
   10179 				sc->sc_tctl |=
   10180 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10181 			if (sc->sc_ctrl & CTRL_TFCE)
   10182 				sc->sc_fcrtl |= FCRTL_XONE;
   10183 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10184 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10185 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10186 			sc->sc_tbi_linkup = 1;
   10187 			if_link_state_change(ifp, LINK_STATE_UP);
   10188 		} else {
   10189 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10190 				device_xname(sc->sc_dev)));
   10191 			sc->sc_tbi_linkup = 0;
   10192 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10193 		}
   10194 		/* Update LED */
   10195 		wm_tbi_serdes_set_linkled(sc);
   10196 	} else if (icr & ICR_RXSEQ)
   10197 		DPRINTF(sc, WM_DEBUG_LINK,
   10198 		    ("%s: LINK: Receive sequence error\n",
   10199 			device_xname(sc->sc_dev)));
   10200 }
   10201 
   10202 /*
   10203  * wm_linkintr_serdes:
   10204  *
   10205  *	Helper; handle link interrupts for TBI mode.
   10206  */
   10207 static void
   10208 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10209 {
   10210 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10211 	struct mii_data *mii = &sc->sc_mii;
   10212 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10213 	uint32_t pcs_adv, pcs_lpab, reg;
   10214 
   10215 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10216 		__func__));
   10217 
   10218 	if (icr & ICR_LSC) {
   10219 		/* Check PCS */
   10220 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10221 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10222 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10223 				device_xname(sc->sc_dev)));
   10224 			mii->mii_media_status |= IFM_ACTIVE;
   10225 			sc->sc_tbi_linkup = 1;
   10226 			if_link_state_change(ifp, LINK_STATE_UP);
   10227 		} else {
   10228 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10229 				device_xname(sc->sc_dev)));
   10230 			mii->mii_media_status |= IFM_NONE;
   10231 			sc->sc_tbi_linkup = 0;
   10232 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10233 			wm_tbi_serdes_set_linkled(sc);
   10234 			return;
   10235 		}
   10236 		mii->mii_media_active |= IFM_1000_SX;
   10237 		if ((reg & PCS_LSTS_FDX) != 0)
   10238 			mii->mii_media_active |= IFM_FDX;
   10239 		else
   10240 			mii->mii_media_active |= IFM_HDX;
   10241 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10242 			/* Check flow */
   10243 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10244 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10245 				DPRINTF(sc, WM_DEBUG_LINK,
   10246 				    ("XXX LINKOK but not ACOMP\n"));
   10247 				return;
   10248 			}
   10249 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10250 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10251 			DPRINTF(sc, WM_DEBUG_LINK,
   10252 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10253 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10254 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10255 				mii->mii_media_active |= IFM_FLOW
   10256 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10257 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10258 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10259 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10260 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10261 				mii->mii_media_active |= IFM_FLOW
   10262 				    | IFM_ETH_TXPAUSE;
   10263 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10264 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10265 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10266 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10267 				mii->mii_media_active |= IFM_FLOW
   10268 				    | IFM_ETH_RXPAUSE;
   10269 		}
   10270 		/* Update LED */
   10271 		wm_tbi_serdes_set_linkled(sc);
   10272 	} else
   10273 		DPRINTF(sc, WM_DEBUG_LINK,
   10274 		    ("%s: LINK: Receive sequence error\n",
   10275 		    device_xname(sc->sc_dev)));
   10276 }
   10277 
   10278 /*
   10279  * wm_linkintr:
   10280  *
   10281  *	Helper; handle link interrupts.
   10282  */
   10283 static void
   10284 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10285 {
   10286 
   10287 	KASSERT(mutex_owned(sc->sc_core_lock));
   10288 
   10289 	if (sc->sc_flags & WM_F_HAS_MII)
   10290 		wm_linkintr_gmii(sc, icr);
   10291 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10292 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10293 		wm_linkintr_serdes(sc, icr);
   10294 	else
   10295 		wm_linkintr_tbi(sc, icr);
   10296 }
   10297 
   10298 
   10299 static inline void
   10300 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10301 {
   10302 
   10303 	if (wmq->wmq_txrx_use_workqueue)
   10304 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   10305 	else
   10306 		softint_schedule(wmq->wmq_si);
   10307 }
   10308 
   10309 static inline void
   10310 wm_legacy_intr_disable(struct wm_softc *sc)
   10311 {
   10312 
   10313 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10314 }
   10315 
   10316 static inline void
   10317 wm_legacy_intr_enable(struct wm_softc *sc)
   10318 {
   10319 
   10320 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10321 }
   10322 
   10323 /*
   10324  * wm_intr_legacy:
   10325  *
   10326  *	Interrupt service routine for INTx and MSI.
   10327  */
   10328 static int
   10329 wm_intr_legacy(void *arg)
   10330 {
   10331 	struct wm_softc *sc = arg;
   10332 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10333 	struct wm_queue *wmq = &sc->sc_queue[0];
   10334 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10335 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10336 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10337 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10338 	uint32_t icr, rndval = 0;
   10339 	bool more = false;
   10340 
   10341 	icr = CSR_READ(sc, WMREG_ICR);
   10342 	if ((icr & sc->sc_icr) == 0)
   10343 		return 0;
   10344 
   10345 	DPRINTF(sc, WM_DEBUG_TX,
   10346 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10347 	if (rndval == 0)
   10348 		rndval = icr;
   10349 
   10350 	mutex_enter(txq->txq_lock);
   10351 
   10352 	if (txq->txq_stopping) {
   10353 		mutex_exit(txq->txq_lock);
   10354 		return 1;
   10355 	}
   10356 
   10357 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10358 	if (icr & ICR_TXDW) {
   10359 		DPRINTF(sc, WM_DEBUG_TX,
   10360 		    ("%s: TX: got TXDW interrupt\n",
   10361 			device_xname(sc->sc_dev)));
   10362 		WM_Q_EVCNT_INCR(txq, txdw);
   10363 	}
   10364 #endif
   10365 	if (txlimit > 0) {
   10366 		more |= wm_txeof(txq, txlimit);
   10367 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10368 			more = true;
   10369 	} else
   10370 		more = true;
   10371 	mutex_exit(txq->txq_lock);
   10372 
   10373 	mutex_enter(rxq->rxq_lock);
   10374 
   10375 	if (rxq->rxq_stopping) {
   10376 		mutex_exit(rxq->rxq_lock);
   10377 		return 1;
   10378 	}
   10379 
   10380 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10381 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10382 		DPRINTF(sc, WM_DEBUG_RX,
   10383 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10384 			device_xname(sc->sc_dev),
   10385 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10386 		WM_Q_EVCNT_INCR(rxq, intr);
   10387 	}
   10388 #endif
   10389 	if (rxlimit > 0) {
   10390 		/*
   10391 		 * wm_rxeof() does *not* call upper layer functions directly,
   10392 		 * as if_percpuq_enqueue() just call softint_schedule().
   10393 		 * So, we can call wm_rxeof() in interrupt context.
   10394 		 */
   10395 		more = wm_rxeof(rxq, rxlimit);
   10396 	} else
   10397 		more = true;
   10398 
   10399 	mutex_exit(rxq->rxq_lock);
   10400 
   10401 	mutex_enter(sc->sc_core_lock);
   10402 
   10403 	if (sc->sc_core_stopping) {
   10404 		mutex_exit(sc->sc_core_lock);
   10405 		return 1;
   10406 	}
   10407 
   10408 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10409 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10410 		wm_linkintr(sc, icr);
   10411 	}
   10412 	if ((icr & ICR_GPI(0)) != 0)
   10413 		device_printf(sc->sc_dev, "got module interrupt\n");
   10414 
   10415 	mutex_exit(sc->sc_core_lock);
   10416 
   10417 	if (icr & ICR_RXO) {
   10418 #if defined(WM_DEBUG)
   10419 		log(LOG_WARNING, "%s: Receive overrun\n",
   10420 		    device_xname(sc->sc_dev));
   10421 #endif /* defined(WM_DEBUG) */
   10422 	}
   10423 
   10424 	rnd_add_uint32(&sc->rnd_source, rndval);
   10425 
   10426 	if (more) {
   10427 		/* Try to get more packets going. */
   10428 		wm_legacy_intr_disable(sc);
   10429 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10430 		wm_sched_handle_queue(sc, wmq);
   10431 	}
   10432 
   10433 	return 1;
   10434 }
   10435 
   10436 static inline void
   10437 wm_txrxintr_disable(struct wm_queue *wmq)
   10438 {
   10439 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10440 
   10441 	if (__predict_false(!wm_is_using_msix(sc))) {
   10442 		wm_legacy_intr_disable(sc);
   10443 		return;
   10444 	}
   10445 
   10446 	if (sc->sc_type == WM_T_82574)
   10447 		CSR_WRITE(sc, WMREG_IMC,
   10448 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10449 	else if (sc->sc_type == WM_T_82575)
   10450 		CSR_WRITE(sc, WMREG_EIMC,
   10451 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10452 	else
   10453 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10454 }
   10455 
   10456 static inline void
   10457 wm_txrxintr_enable(struct wm_queue *wmq)
   10458 {
   10459 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10460 
   10461 	wm_itrs_calculate(sc, wmq);
   10462 
   10463 	if (__predict_false(!wm_is_using_msix(sc))) {
   10464 		wm_legacy_intr_enable(sc);
   10465 		return;
   10466 	}
   10467 
   10468 	/*
   10469 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10470 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10471 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10472 	 * while each wm_handle_queue(wmq) is runnig.
   10473 	 */
   10474 	if (sc->sc_type == WM_T_82574)
   10475 		CSR_WRITE(sc, WMREG_IMS,
   10476 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10477 	else if (sc->sc_type == WM_T_82575)
   10478 		CSR_WRITE(sc, WMREG_EIMS,
   10479 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10480 	else
   10481 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10482 }
   10483 
   10484 static int
   10485 wm_txrxintr_msix(void *arg)
   10486 {
   10487 	struct wm_queue *wmq = arg;
   10488 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10489 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10490 	struct wm_softc *sc = txq->txq_sc;
   10491 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10492 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10493 	bool txmore;
   10494 	bool rxmore;
   10495 
   10496 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10497 
   10498 	DPRINTF(sc, WM_DEBUG_TX,
   10499 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10500 
   10501 	wm_txrxintr_disable(wmq);
   10502 
   10503 	mutex_enter(txq->txq_lock);
   10504 
   10505 	if (txq->txq_stopping) {
   10506 		mutex_exit(txq->txq_lock);
   10507 		return 1;
   10508 	}
   10509 
   10510 	WM_Q_EVCNT_INCR(txq, txdw);
   10511 	if (txlimit > 0) {
   10512 		txmore = wm_txeof(txq, txlimit);
   10513 		/* wm_deferred start() is done in wm_handle_queue(). */
   10514 	} else
   10515 		txmore = true;
   10516 	mutex_exit(txq->txq_lock);
   10517 
   10518 	DPRINTF(sc, WM_DEBUG_RX,
   10519 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10520 	mutex_enter(rxq->rxq_lock);
   10521 
   10522 	if (rxq->rxq_stopping) {
   10523 		mutex_exit(rxq->rxq_lock);
   10524 		return 1;
   10525 	}
   10526 
   10527 	WM_Q_EVCNT_INCR(rxq, intr);
   10528 	if (rxlimit > 0) {
   10529 		rxmore = wm_rxeof(rxq, rxlimit);
   10530 	} else
   10531 		rxmore = true;
   10532 	mutex_exit(rxq->rxq_lock);
   10533 
   10534 	wm_itrs_writereg(sc, wmq);
   10535 
   10536 	if (txmore || rxmore) {
   10537 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10538 		wm_sched_handle_queue(sc, wmq);
   10539 	} else
   10540 		wm_txrxintr_enable(wmq);
   10541 
   10542 	return 1;
   10543 }
   10544 
   10545 static void
   10546 wm_handle_queue(void *arg)
   10547 {
   10548 	struct wm_queue *wmq = arg;
   10549 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10550 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10551 	struct wm_softc *sc = txq->txq_sc;
   10552 	u_int txlimit = sc->sc_tx_process_limit;
   10553 	u_int rxlimit = sc->sc_rx_process_limit;
   10554 	bool txmore;
   10555 	bool rxmore;
   10556 
   10557 	mutex_enter(txq->txq_lock);
   10558 	if (txq->txq_stopping) {
   10559 		mutex_exit(txq->txq_lock);
   10560 		return;
   10561 	}
   10562 	txmore = wm_txeof(txq, txlimit);
   10563 	wm_deferred_start_locked(txq);
   10564 	mutex_exit(txq->txq_lock);
   10565 
   10566 	mutex_enter(rxq->rxq_lock);
   10567 	if (rxq->rxq_stopping) {
   10568 		mutex_exit(rxq->rxq_lock);
   10569 		return;
   10570 	}
   10571 	WM_Q_EVCNT_INCR(rxq, defer);
   10572 	rxmore = wm_rxeof(rxq, rxlimit);
   10573 	mutex_exit(rxq->rxq_lock);
   10574 
   10575 	if (txmore || rxmore) {
   10576 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10577 		wm_sched_handle_queue(sc, wmq);
   10578 	} else
   10579 		wm_txrxintr_enable(wmq);
   10580 }
   10581 
   10582 static void
   10583 wm_handle_queue_work(struct work *wk, void *context)
   10584 {
   10585 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10586 
   10587 	/*
   10588 	 * "enqueued flag" is not required here.
   10589 	 */
   10590 	wm_handle_queue(wmq);
   10591 }
   10592 
   10593 /*
   10594  * wm_linkintr_msix:
   10595  *
   10596  *	Interrupt service routine for link status change for MSI-X.
   10597  */
   10598 static int
   10599 wm_linkintr_msix(void *arg)
   10600 {
   10601 	struct wm_softc *sc = arg;
   10602 	uint32_t reg;
   10603 	bool has_rxo;
   10604 
   10605 	reg = CSR_READ(sc, WMREG_ICR);
   10606 	mutex_enter(sc->sc_core_lock);
   10607 	DPRINTF(sc, WM_DEBUG_LINK,
   10608 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10609 		device_xname(sc->sc_dev), reg));
   10610 
   10611 	if (sc->sc_core_stopping)
   10612 		goto out;
   10613 
   10614 	if ((reg & ICR_LSC) != 0) {
   10615 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10616 		wm_linkintr(sc, ICR_LSC);
   10617 	}
   10618 	if ((reg & ICR_GPI(0)) != 0)
   10619 		device_printf(sc->sc_dev, "got module interrupt\n");
   10620 
   10621 	/*
   10622 	 * XXX 82574 MSI-X mode workaround
   10623 	 *
   10624 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10625 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10626 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10627 	 * interrupts by writing WMREG_ICS to process receive packets.
   10628 	 */
   10629 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10630 #if defined(WM_DEBUG)
   10631 		log(LOG_WARNING, "%s: Receive overrun\n",
   10632 		    device_xname(sc->sc_dev));
   10633 #endif /* defined(WM_DEBUG) */
   10634 
   10635 		has_rxo = true;
   10636 		/*
   10637 		 * The RXO interrupt is very high rate when receive traffic is
   10638 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10639 		 * interrupts. ICR_OTHER will be enabled at the end of
   10640 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10641 		 * ICR_RXQ(1) interrupts.
   10642 		 */
   10643 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10644 
   10645 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10646 	}
   10647 
   10648 
   10649 
   10650 out:
   10651 	mutex_exit(sc->sc_core_lock);
   10652 
   10653 	if (sc->sc_type == WM_T_82574) {
   10654 		if (!has_rxo)
   10655 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10656 		else
   10657 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10658 	} else if (sc->sc_type == WM_T_82575)
   10659 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10660 	else
   10661 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10662 
   10663 	return 1;
   10664 }
   10665 
   10666 /*
   10667  * Media related.
   10668  * GMII, SGMII, TBI (and SERDES)
   10669  */
   10670 
   10671 /* Common */
   10672 
   10673 /*
   10674  * wm_tbi_serdes_set_linkled:
   10675  *
   10676  *	Update the link LED on TBI and SERDES devices.
   10677  */
   10678 static void
   10679 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10680 {
   10681 
   10682 	if (sc->sc_tbi_linkup)
   10683 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10684 	else
   10685 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10686 
   10687 	/* 82540 or newer devices are active low */
   10688 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10689 
   10690 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10691 }
   10692 
   10693 /* GMII related */
   10694 
   10695 /*
   10696  * wm_gmii_reset:
   10697  *
   10698  *	Reset the PHY.
   10699  */
   10700 static void
   10701 wm_gmii_reset(struct wm_softc *sc)
   10702 {
   10703 	uint32_t reg;
   10704 	int rv;
   10705 
   10706 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10707 		device_xname(sc->sc_dev), __func__));
   10708 
   10709 	rv = sc->phy.acquire(sc);
   10710 	if (rv != 0) {
   10711 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10712 		    __func__);
   10713 		return;
   10714 	}
   10715 
   10716 	switch (sc->sc_type) {
   10717 	case WM_T_82542_2_0:
   10718 	case WM_T_82542_2_1:
   10719 		/* null */
   10720 		break;
   10721 	case WM_T_82543:
   10722 		/*
   10723 		 * With 82543, we need to force speed and duplex on the MAC
   10724 		 * equal to what the PHY speed and duplex configuration is.
   10725 		 * In addition, we need to perform a hardware reset on the PHY
   10726 		 * to take it out of reset.
   10727 		 */
   10728 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10729 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10730 
   10731 		/* The PHY reset pin is active-low. */
   10732 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10733 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10734 		    CTRL_EXT_SWDPIN(4));
   10735 		reg |= CTRL_EXT_SWDPIO(4);
   10736 
   10737 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10738 		CSR_WRITE_FLUSH(sc);
   10739 		delay(10*1000);
   10740 
   10741 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10742 		CSR_WRITE_FLUSH(sc);
   10743 		delay(150);
   10744 #if 0
   10745 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10746 #endif
   10747 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10748 		break;
   10749 	case WM_T_82544:	/* Reset 10000us */
   10750 	case WM_T_82540:
   10751 	case WM_T_82545:
   10752 	case WM_T_82545_3:
   10753 	case WM_T_82546:
   10754 	case WM_T_82546_3:
   10755 	case WM_T_82541:
   10756 	case WM_T_82541_2:
   10757 	case WM_T_82547:
   10758 	case WM_T_82547_2:
   10759 	case WM_T_82571:	/* Reset 100us */
   10760 	case WM_T_82572:
   10761 	case WM_T_82573:
   10762 	case WM_T_82574:
   10763 	case WM_T_82575:
   10764 	case WM_T_82576:
   10765 	case WM_T_82580:
   10766 	case WM_T_I350:
   10767 	case WM_T_I354:
   10768 	case WM_T_I210:
   10769 	case WM_T_I211:
   10770 	case WM_T_82583:
   10771 	case WM_T_80003:
   10772 		/* Generic reset */
   10773 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10774 		CSR_WRITE_FLUSH(sc);
   10775 		delay(20000);
   10776 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10777 		CSR_WRITE_FLUSH(sc);
   10778 		delay(20000);
   10779 
   10780 		if ((sc->sc_type == WM_T_82541)
   10781 		    || (sc->sc_type == WM_T_82541_2)
   10782 		    || (sc->sc_type == WM_T_82547)
   10783 		    || (sc->sc_type == WM_T_82547_2)) {
   10784 			/* Workaround for igp are done in igp_reset() */
   10785 			/* XXX add code to set LED after phy reset */
   10786 		}
   10787 		break;
   10788 	case WM_T_ICH8:
   10789 	case WM_T_ICH9:
   10790 	case WM_T_ICH10:
   10791 	case WM_T_PCH:
   10792 	case WM_T_PCH2:
   10793 	case WM_T_PCH_LPT:
   10794 	case WM_T_PCH_SPT:
   10795 	case WM_T_PCH_CNP:
   10796 		/* Generic reset */
   10797 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10798 		CSR_WRITE_FLUSH(sc);
   10799 		delay(100);
   10800 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10801 		CSR_WRITE_FLUSH(sc);
   10802 		delay(150);
   10803 		break;
   10804 	default:
   10805 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10806 		    __func__);
   10807 		break;
   10808 	}
   10809 
   10810 	sc->phy.release(sc);
   10811 
   10812 	/* get_cfg_done */
   10813 	wm_get_cfg_done(sc);
   10814 
   10815 	/* Extra setup */
   10816 	switch (sc->sc_type) {
   10817 	case WM_T_82542_2_0:
   10818 	case WM_T_82542_2_1:
   10819 	case WM_T_82543:
   10820 	case WM_T_82544:
   10821 	case WM_T_82540:
   10822 	case WM_T_82545:
   10823 	case WM_T_82545_3:
   10824 	case WM_T_82546:
   10825 	case WM_T_82546_3:
   10826 	case WM_T_82541_2:
   10827 	case WM_T_82547_2:
   10828 	case WM_T_82571:
   10829 	case WM_T_82572:
   10830 	case WM_T_82573:
   10831 	case WM_T_82574:
   10832 	case WM_T_82583:
   10833 	case WM_T_82575:
   10834 	case WM_T_82576:
   10835 	case WM_T_82580:
   10836 	case WM_T_I350:
   10837 	case WM_T_I354:
   10838 	case WM_T_I210:
   10839 	case WM_T_I211:
   10840 	case WM_T_80003:
   10841 		/* Null */
   10842 		break;
   10843 	case WM_T_82541:
   10844 	case WM_T_82547:
   10845 		/* XXX Configure actively LED after PHY reset */
   10846 		break;
   10847 	case WM_T_ICH8:
   10848 	case WM_T_ICH9:
   10849 	case WM_T_ICH10:
   10850 	case WM_T_PCH:
   10851 	case WM_T_PCH2:
   10852 	case WM_T_PCH_LPT:
   10853 	case WM_T_PCH_SPT:
   10854 	case WM_T_PCH_CNP:
   10855 		wm_phy_post_reset(sc);
   10856 		break;
   10857 	default:
   10858 		panic("%s: unknown type\n", __func__);
   10859 		break;
   10860 	}
   10861 }
   10862 
   10863 /*
   10864  * Set up sc_phytype and mii_{read|write}reg.
   10865  *
   10866  *  To identify PHY type, correct read/write function should be selected.
   10867  * To select correct read/write function, PCI ID or MAC type are required
   10868  * without accessing PHY registers.
   10869  *
   10870  *  On the first call of this function, PHY ID is not known yet. Check
   10871  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10872  * result might be incorrect.
   10873  *
   10874  *  In the second call, PHY OUI and model is used to identify PHY type.
   10875  * It might not be perfect because of the lack of compared entry, but it
   10876  * would be better than the first call.
   10877  *
   10878  *  If the detected new result and previous assumption is different,
   10879  * a diagnostic message will be printed.
   10880  */
   10881 static void
   10882 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10883     uint16_t phy_model)
   10884 {
   10885 	device_t dev = sc->sc_dev;
   10886 	struct mii_data *mii = &sc->sc_mii;
   10887 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10888 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10889 	mii_readreg_t new_readreg;
   10890 	mii_writereg_t new_writereg;
   10891 	bool dodiag = true;
   10892 
   10893 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10894 		device_xname(sc->sc_dev), __func__));
   10895 
   10896 	/*
   10897 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10898 	 * incorrect. So don't print diag output when it's 2nd call.
   10899 	 */
   10900 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10901 		dodiag = false;
   10902 
   10903 	if (mii->mii_readreg == NULL) {
   10904 		/*
   10905 		 *  This is the first call of this function. For ICH and PCH
   10906 		 * variants, it's difficult to determine the PHY access method
   10907 		 * by sc_type, so use the PCI product ID for some devices.
   10908 		 */
   10909 
   10910 		switch (sc->sc_pcidevid) {
   10911 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10912 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10913 			/* 82577 */
   10914 			new_phytype = WMPHY_82577;
   10915 			break;
   10916 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10917 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10918 			/* 82578 */
   10919 			new_phytype = WMPHY_82578;
   10920 			break;
   10921 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10922 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10923 			/* 82579 */
   10924 			new_phytype = WMPHY_82579;
   10925 			break;
   10926 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10927 		case PCI_PRODUCT_INTEL_82801I_BM:
   10928 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10929 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10930 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10931 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10932 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10933 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10934 			/* ICH8, 9, 10 with 82567 */
   10935 			new_phytype = WMPHY_BM;
   10936 			break;
   10937 		default:
   10938 			break;
   10939 		}
   10940 	} else {
   10941 		/* It's not the first call. Use PHY OUI and model */
   10942 		switch (phy_oui) {
   10943 		case MII_OUI_ATTANSIC: /* atphy(4) */
   10944 			switch (phy_model) {
   10945 			case MII_MODEL_ATTANSIC_AR8021:
   10946 				new_phytype = WMPHY_82578;
   10947 				break;
   10948 			default:
   10949 				break;
   10950 			}
   10951 			break;
   10952 		case MII_OUI_xxMARVELL:
   10953 			switch (phy_model) {
   10954 			case MII_MODEL_xxMARVELL_I210:
   10955 				new_phytype = WMPHY_I210;
   10956 				break;
   10957 			case MII_MODEL_xxMARVELL_E1011:
   10958 			case MII_MODEL_xxMARVELL_E1000_3:
   10959 			case MII_MODEL_xxMARVELL_E1000_5:
   10960 			case MII_MODEL_xxMARVELL_E1112:
   10961 				new_phytype = WMPHY_M88;
   10962 				break;
   10963 			case MII_MODEL_xxMARVELL_E1149:
   10964 				new_phytype = WMPHY_BM;
   10965 				break;
   10966 			case MII_MODEL_xxMARVELL_E1111:
   10967 			case MII_MODEL_xxMARVELL_I347:
   10968 			case MII_MODEL_xxMARVELL_E1512:
   10969 			case MII_MODEL_xxMARVELL_E1340M:
   10970 			case MII_MODEL_xxMARVELL_E1543:
   10971 				new_phytype = WMPHY_M88;
   10972 				break;
   10973 			case MII_MODEL_xxMARVELL_I82563:
   10974 				new_phytype = WMPHY_GG82563;
   10975 				break;
   10976 			default:
   10977 				break;
   10978 			}
   10979 			break;
   10980 		case MII_OUI_INTEL:
   10981 			switch (phy_model) {
   10982 			case MII_MODEL_INTEL_I82577:
   10983 				new_phytype = WMPHY_82577;
   10984 				break;
   10985 			case MII_MODEL_INTEL_I82579:
   10986 				new_phytype = WMPHY_82579;
   10987 				break;
   10988 			case MII_MODEL_INTEL_I217:
   10989 				new_phytype = WMPHY_I217;
   10990 				break;
   10991 			case MII_MODEL_INTEL_I82580:
   10992 				new_phytype = WMPHY_82580;
   10993 				break;
   10994 			case MII_MODEL_INTEL_I350:
   10995 				new_phytype = WMPHY_I350;
   10996 				break;
   10997 			default:
   10998 				break;
   10999 			}
   11000 			break;
   11001 		case MII_OUI_yyINTEL:
   11002 			switch (phy_model) {
   11003 			case MII_MODEL_yyINTEL_I82562G:
   11004 			case MII_MODEL_yyINTEL_I82562EM:
   11005 			case MII_MODEL_yyINTEL_I82562ET:
   11006 				new_phytype = WMPHY_IFE;
   11007 				break;
   11008 			case MII_MODEL_yyINTEL_IGP01E1000:
   11009 				new_phytype = WMPHY_IGP;
   11010 				break;
   11011 			case MII_MODEL_yyINTEL_I82566:
   11012 				new_phytype = WMPHY_IGP_3;
   11013 				break;
   11014 			default:
   11015 				break;
   11016 			}
   11017 			break;
   11018 		default:
   11019 			break;
   11020 		}
   11021 
   11022 		if (dodiag) {
   11023 			if (new_phytype == WMPHY_UNKNOWN)
   11024 				aprint_verbose_dev(dev,
   11025 				    "%s: Unknown PHY model. OUI=%06x, "
   11026 				    "model=%04x\n", __func__, phy_oui,
   11027 				    phy_model);
   11028 
   11029 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11030 			    && (sc->sc_phytype != new_phytype)) {
   11031 				aprint_error_dev(dev, "Previously assumed PHY "
   11032 				    "type(%u) was incorrect. PHY type from PHY"
   11033 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11034 			}
   11035 		}
   11036 	}
   11037 
   11038 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11039 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11040 		/* SGMII */
   11041 		new_readreg = wm_sgmii_readreg;
   11042 		new_writereg = wm_sgmii_writereg;
   11043 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11044 		/* BM2 (phyaddr == 1) */
   11045 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11046 		    && (new_phytype != WMPHY_BM)
   11047 		    && (new_phytype != WMPHY_UNKNOWN))
   11048 			doubt_phytype = new_phytype;
   11049 		new_phytype = WMPHY_BM;
   11050 		new_readreg = wm_gmii_bm_readreg;
   11051 		new_writereg = wm_gmii_bm_writereg;
   11052 	} else if (sc->sc_type >= WM_T_PCH) {
   11053 		/* All PCH* use _hv_ */
   11054 		new_readreg = wm_gmii_hv_readreg;
   11055 		new_writereg = wm_gmii_hv_writereg;
   11056 	} else if (sc->sc_type >= WM_T_ICH8) {
   11057 		/* non-82567 ICH8, 9 and 10 */
   11058 		new_readreg = wm_gmii_i82544_readreg;
   11059 		new_writereg = wm_gmii_i82544_writereg;
   11060 	} else if (sc->sc_type >= WM_T_80003) {
   11061 		/* 80003 */
   11062 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11063 		    && (new_phytype != WMPHY_GG82563)
   11064 		    && (new_phytype != WMPHY_UNKNOWN))
   11065 			doubt_phytype = new_phytype;
   11066 		new_phytype = WMPHY_GG82563;
   11067 		new_readreg = wm_gmii_i80003_readreg;
   11068 		new_writereg = wm_gmii_i80003_writereg;
   11069 	} else if (sc->sc_type >= WM_T_I210) {
   11070 		/* I210 and I211 */
   11071 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11072 		    && (new_phytype != WMPHY_I210)
   11073 		    && (new_phytype != WMPHY_UNKNOWN))
   11074 			doubt_phytype = new_phytype;
   11075 		new_phytype = WMPHY_I210;
   11076 		new_readreg = wm_gmii_gs40g_readreg;
   11077 		new_writereg = wm_gmii_gs40g_writereg;
   11078 	} else if (sc->sc_type >= WM_T_82580) {
   11079 		/* 82580, I350 and I354 */
   11080 		new_readreg = wm_gmii_82580_readreg;
   11081 		new_writereg = wm_gmii_82580_writereg;
   11082 	} else if (sc->sc_type >= WM_T_82544) {
   11083 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11084 		new_readreg = wm_gmii_i82544_readreg;
   11085 		new_writereg = wm_gmii_i82544_writereg;
   11086 	} else {
   11087 		new_readreg = wm_gmii_i82543_readreg;
   11088 		new_writereg = wm_gmii_i82543_writereg;
   11089 	}
   11090 
   11091 	if (new_phytype == WMPHY_BM) {
   11092 		/* All BM use _bm_ */
   11093 		new_readreg = wm_gmii_bm_readreg;
   11094 		new_writereg = wm_gmii_bm_writereg;
   11095 	}
   11096 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11097 		/* All PCH* use _hv_ */
   11098 		new_readreg = wm_gmii_hv_readreg;
   11099 		new_writereg = wm_gmii_hv_writereg;
   11100 	}
   11101 
   11102 	/* Diag output */
   11103 	if (dodiag) {
   11104 		if (doubt_phytype != WMPHY_UNKNOWN)
   11105 			aprint_error_dev(dev, "Assumed new PHY type was "
   11106 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11107 			    new_phytype);
   11108 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11109 		    && (sc->sc_phytype != new_phytype))
   11110 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11111 			    "was incorrect. New PHY type = %u\n",
   11112 			    sc->sc_phytype, new_phytype);
   11113 
   11114 		if ((mii->mii_readreg != NULL) &&
   11115 		    (new_phytype == WMPHY_UNKNOWN))
   11116 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11117 
   11118 		if ((mii->mii_readreg != NULL) &&
   11119 		    (mii->mii_readreg != new_readreg))
   11120 			aprint_error_dev(dev, "Previously assumed PHY "
   11121 			    "read/write function was incorrect.\n");
   11122 	}
   11123 
   11124 	/* Update now */
   11125 	sc->sc_phytype = new_phytype;
   11126 	mii->mii_readreg = new_readreg;
   11127 	mii->mii_writereg = new_writereg;
   11128 	if (new_readreg == wm_gmii_hv_readreg) {
   11129 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11130 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11131 	} else if (new_readreg == wm_sgmii_readreg) {
   11132 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11133 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11134 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11135 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11136 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11137 	}
   11138 }
   11139 
   11140 /*
   11141  * wm_get_phy_id_82575:
   11142  *
   11143  * Return PHY ID. Return -1 if it failed.
   11144  */
   11145 static int
   11146 wm_get_phy_id_82575(struct wm_softc *sc)
   11147 {
   11148 	uint32_t reg;
   11149 	int phyid = -1;
   11150 
   11151 	/* XXX */
   11152 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11153 		return -1;
   11154 
   11155 	if (wm_sgmii_uses_mdio(sc)) {
   11156 		switch (sc->sc_type) {
   11157 		case WM_T_82575:
   11158 		case WM_T_82576:
   11159 			reg = CSR_READ(sc, WMREG_MDIC);
   11160 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11161 			break;
   11162 		case WM_T_82580:
   11163 		case WM_T_I350:
   11164 		case WM_T_I354:
   11165 		case WM_T_I210:
   11166 		case WM_T_I211:
   11167 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11168 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11169 			break;
   11170 		default:
   11171 			return -1;
   11172 		}
   11173 	}
   11174 
   11175 	return phyid;
   11176 }
   11177 
   11178 /*
   11179  * wm_gmii_mediainit:
   11180  *
   11181  *	Initialize media for use on 1000BASE-T devices.
   11182  */
   11183 static void
   11184 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11185 {
   11186 	device_t dev = sc->sc_dev;
   11187 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11188 	struct mii_data *mii = &sc->sc_mii;
   11189 
   11190 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11191 		device_xname(sc->sc_dev), __func__));
   11192 
   11193 	/* We have GMII. */
   11194 	sc->sc_flags |= WM_F_HAS_MII;
   11195 
   11196 	if (sc->sc_type == WM_T_80003)
   11197 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11198 	else
   11199 		sc->sc_tipg = TIPG_1000T_DFLT;
   11200 
   11201 	/*
   11202 	 * Let the chip set speed/duplex on its own based on
   11203 	 * signals from the PHY.
   11204 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11205 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11206 	 */
   11207 	sc->sc_ctrl |= CTRL_SLU;
   11208 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11209 
   11210 	/* Initialize our media structures and probe the GMII. */
   11211 	mii->mii_ifp = ifp;
   11212 
   11213 	mii->mii_statchg = wm_gmii_statchg;
   11214 
   11215 	/* get PHY control from SMBus to PCIe */
   11216 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11217 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11218 	    || (sc->sc_type == WM_T_PCH_CNP))
   11219 		wm_init_phy_workarounds_pchlan(sc);
   11220 
   11221 	wm_gmii_reset(sc);
   11222 
   11223 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11224 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11225 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11226 
   11227 	/* Setup internal SGMII PHY for SFP */
   11228 	wm_sgmii_sfp_preconfig(sc);
   11229 
   11230 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11231 	    || (sc->sc_type == WM_T_82580)
   11232 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11233 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11234 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11235 			/* Attach only one port */
   11236 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11237 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11238 		} else {
   11239 			int i, id;
   11240 			uint32_t ctrl_ext;
   11241 
   11242 			id = wm_get_phy_id_82575(sc);
   11243 			if (id != -1) {
   11244 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11245 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11246 			}
   11247 			if ((id == -1)
   11248 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11249 				/* Power on sgmii phy if it is disabled */
   11250 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11251 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11252 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11253 				CSR_WRITE_FLUSH(sc);
   11254 				delay(300*1000); /* XXX too long */
   11255 
   11256 				/*
   11257 				 * From 1 to 8.
   11258 				 *
   11259 				 * I2C access fails with I2C register's ERROR
   11260 				 * bit set, so prevent error message while
   11261 				 * scanning.
   11262 				 */
   11263 				sc->phy.no_errprint = true;
   11264 				for (i = 1; i < 8; i++)
   11265 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11266 					    0xffffffff, i, MII_OFFSET_ANY,
   11267 					    MIIF_DOPAUSE);
   11268 				sc->phy.no_errprint = false;
   11269 
   11270 				/* Restore previous sfp cage power state */
   11271 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11272 			}
   11273 		}
   11274 	} else
   11275 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11276 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11277 
   11278 	/*
   11279 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11280 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11281 	 */
   11282 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11283 		|| (sc->sc_type == WM_T_PCH_SPT)
   11284 		|| (sc->sc_type == WM_T_PCH_CNP))
   11285 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11286 		wm_set_mdio_slow_mode_hv(sc);
   11287 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11288 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11289 	}
   11290 
   11291 	/*
   11292 	 * (For ICH8 variants)
   11293 	 * If PHY detection failed, use BM's r/w function and retry.
   11294 	 */
   11295 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11296 		/* if failed, retry with *_bm_* */
   11297 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11298 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11299 		    sc->sc_phytype);
   11300 		sc->sc_phytype = WMPHY_BM;
   11301 		mii->mii_readreg = wm_gmii_bm_readreg;
   11302 		mii->mii_writereg = wm_gmii_bm_writereg;
   11303 
   11304 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11305 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11306 	}
   11307 
   11308 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11309 		/* Any PHY wasn't found */
   11310 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11311 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11312 		sc->sc_phytype = WMPHY_NONE;
   11313 	} else {
   11314 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11315 
   11316 		/*
   11317 		 * PHY found! Check PHY type again by the second call of
   11318 		 * wm_gmii_setup_phytype.
   11319 		 */
   11320 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11321 		    child->mii_mpd_model);
   11322 
   11323 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11324 	}
   11325 }
   11326 
   11327 /*
   11328  * wm_gmii_mediachange:	[ifmedia interface function]
   11329  *
   11330  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11331  */
   11332 static int
   11333 wm_gmii_mediachange(struct ifnet *ifp)
   11334 {
   11335 	struct wm_softc *sc = ifp->if_softc;
   11336 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11337 	uint32_t reg;
   11338 	int rc;
   11339 
   11340 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11341 		device_xname(sc->sc_dev), __func__));
   11342 
   11343 	KASSERT(mutex_owned(sc->sc_core_lock));
   11344 
   11345 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11346 		return 0;
   11347 
   11348 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11349 	if ((sc->sc_type == WM_T_82580)
   11350 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11351 	    || (sc->sc_type == WM_T_I211)) {
   11352 		reg = CSR_READ(sc, WMREG_PHPM);
   11353 		reg &= ~PHPM_GO_LINK_D;
   11354 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11355 	}
   11356 
   11357 	/* Disable D0 LPLU. */
   11358 	wm_lplu_d0_disable(sc);
   11359 
   11360 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11361 	sc->sc_ctrl |= CTRL_SLU;
   11362 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11363 	    || (sc->sc_type > WM_T_82543)) {
   11364 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11365 	} else {
   11366 		sc->sc_ctrl &= ~CTRL_ASDE;
   11367 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11368 		if (ife->ifm_media & IFM_FDX)
   11369 			sc->sc_ctrl |= CTRL_FD;
   11370 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11371 		case IFM_10_T:
   11372 			sc->sc_ctrl |= CTRL_SPEED_10;
   11373 			break;
   11374 		case IFM_100_TX:
   11375 			sc->sc_ctrl |= CTRL_SPEED_100;
   11376 			break;
   11377 		case IFM_1000_T:
   11378 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11379 			break;
   11380 		case IFM_NONE:
   11381 			/* There is no specific setting for IFM_NONE */
   11382 			break;
   11383 		default:
   11384 			panic("wm_gmii_mediachange: bad media 0x%x",
   11385 			    ife->ifm_media);
   11386 		}
   11387 	}
   11388 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11389 	CSR_WRITE_FLUSH(sc);
   11390 
   11391 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11392 		wm_serdes_mediachange(ifp);
   11393 
   11394 	if (sc->sc_type <= WM_T_82543)
   11395 		wm_gmii_reset(sc);
   11396 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11397 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11398 		/* allow time for SFP cage time to power up phy */
   11399 		delay(300 * 1000);
   11400 		wm_gmii_reset(sc);
   11401 	}
   11402 
   11403 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11404 		return 0;
   11405 	return rc;
   11406 }
   11407 
   11408 /*
   11409  * wm_gmii_mediastatus:	[ifmedia interface function]
   11410  *
   11411  *	Get the current interface media status on a 1000BASE-T device.
   11412  */
   11413 static void
   11414 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11415 {
   11416 	struct wm_softc *sc = ifp->if_softc;
   11417 
   11418 	KASSERT(mutex_owned(sc->sc_core_lock));
   11419 
   11420 	ether_mediastatus(ifp, ifmr);
   11421 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11422 	    | sc->sc_flowflags;
   11423 }
   11424 
   11425 #define	MDI_IO		CTRL_SWDPIN(2)
   11426 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11427 #define	MDI_CLK		CTRL_SWDPIN(3)
   11428 
   11429 static void
   11430 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11431 {
   11432 	uint32_t i, v;
   11433 
   11434 	v = CSR_READ(sc, WMREG_CTRL);
   11435 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11436 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11437 
   11438 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11439 		if (data & i)
   11440 			v |= MDI_IO;
   11441 		else
   11442 			v &= ~MDI_IO;
   11443 		CSR_WRITE(sc, WMREG_CTRL, v);
   11444 		CSR_WRITE_FLUSH(sc);
   11445 		delay(10);
   11446 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11447 		CSR_WRITE_FLUSH(sc);
   11448 		delay(10);
   11449 		CSR_WRITE(sc, WMREG_CTRL, v);
   11450 		CSR_WRITE_FLUSH(sc);
   11451 		delay(10);
   11452 	}
   11453 }
   11454 
   11455 static uint16_t
   11456 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11457 {
   11458 	uint32_t v, i;
   11459 	uint16_t data = 0;
   11460 
   11461 	v = CSR_READ(sc, WMREG_CTRL);
   11462 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11463 	v |= CTRL_SWDPIO(3);
   11464 
   11465 	CSR_WRITE(sc, WMREG_CTRL, v);
   11466 	CSR_WRITE_FLUSH(sc);
   11467 	delay(10);
   11468 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11469 	CSR_WRITE_FLUSH(sc);
   11470 	delay(10);
   11471 	CSR_WRITE(sc, WMREG_CTRL, v);
   11472 	CSR_WRITE_FLUSH(sc);
   11473 	delay(10);
   11474 
   11475 	for (i = 0; i < 16; i++) {
   11476 		data <<= 1;
   11477 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11478 		CSR_WRITE_FLUSH(sc);
   11479 		delay(10);
   11480 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11481 			data |= 1;
   11482 		CSR_WRITE(sc, WMREG_CTRL, v);
   11483 		CSR_WRITE_FLUSH(sc);
   11484 		delay(10);
   11485 	}
   11486 
   11487 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11488 	CSR_WRITE_FLUSH(sc);
   11489 	delay(10);
   11490 	CSR_WRITE(sc, WMREG_CTRL, v);
   11491 	CSR_WRITE_FLUSH(sc);
   11492 	delay(10);
   11493 
   11494 	return data;
   11495 }
   11496 
   11497 #undef MDI_IO
   11498 #undef MDI_DIR
   11499 #undef MDI_CLK
   11500 
   11501 /*
   11502  * wm_gmii_i82543_readreg:	[mii interface function]
   11503  *
   11504  *	Read a PHY register on the GMII (i82543 version).
   11505  */
   11506 static int
   11507 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11508 {
   11509 	struct wm_softc *sc = device_private(dev);
   11510 
   11511 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11512 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11513 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11514 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11515 
   11516 	DPRINTF(sc, WM_DEBUG_GMII,
   11517 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11518 		device_xname(dev), phy, reg, *val));
   11519 
   11520 	return 0;
   11521 }
   11522 
   11523 /*
   11524  * wm_gmii_i82543_writereg:	[mii interface function]
   11525  *
   11526  *	Write a PHY register on the GMII (i82543 version).
   11527  */
   11528 static int
   11529 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11530 {
   11531 	struct wm_softc *sc = device_private(dev);
   11532 
   11533 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11534 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11535 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11536 	    (MII_COMMAND_START << 30), 32);
   11537 
   11538 	return 0;
   11539 }
   11540 
   11541 /*
   11542  * wm_gmii_mdic_readreg:	[mii interface function]
   11543  *
   11544  *	Read a PHY register on the GMII.
   11545  */
   11546 static int
   11547 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11548 {
   11549 	struct wm_softc *sc = device_private(dev);
   11550 	uint32_t mdic = 0;
   11551 	int i;
   11552 
   11553 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11554 	    && (reg > MII_ADDRMASK)) {
   11555 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11556 		    __func__, sc->sc_phytype, reg);
   11557 		reg &= MII_ADDRMASK;
   11558 	}
   11559 
   11560 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11561 	    MDIC_REGADD(reg));
   11562 
   11563 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11564 		delay(50);
   11565 		mdic = CSR_READ(sc, WMREG_MDIC);
   11566 		if (mdic & MDIC_READY)
   11567 			break;
   11568 	}
   11569 
   11570 	if ((mdic & MDIC_READY) == 0) {
   11571 		DPRINTF(sc, WM_DEBUG_GMII,
   11572 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11573 			device_xname(dev), phy, reg));
   11574 		return ETIMEDOUT;
   11575 	} else if (mdic & MDIC_E) {
   11576 		/* This is normal if no PHY is present. */
   11577 		DPRINTF(sc, WM_DEBUG_GMII,
   11578 		    ("%s: MDIC read error: phy %d reg %d\n",
   11579 			device_xname(sc->sc_dev), phy, reg));
   11580 		return -1;
   11581 	} else
   11582 		*val = MDIC_DATA(mdic);
   11583 
   11584 	/*
   11585 	 * Allow some time after each MDIC transaction to avoid
   11586 	 * reading duplicate data in the next MDIC transaction.
   11587 	 */
   11588 	if (sc->sc_type == WM_T_PCH2)
   11589 		delay(100);
   11590 
   11591 	return 0;
   11592 }
   11593 
   11594 /*
   11595  * wm_gmii_mdic_writereg:	[mii interface function]
   11596  *
   11597  *	Write a PHY register on the GMII.
   11598  */
   11599 static int
   11600 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11601 {
   11602 	struct wm_softc *sc = device_private(dev);
   11603 	uint32_t mdic = 0;
   11604 	int i;
   11605 
   11606 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11607 	    && (reg > MII_ADDRMASK)) {
   11608 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11609 		    __func__, sc->sc_phytype, reg);
   11610 		reg &= MII_ADDRMASK;
   11611 	}
   11612 
   11613 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11614 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11615 
   11616 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11617 		delay(50);
   11618 		mdic = CSR_READ(sc, WMREG_MDIC);
   11619 		if (mdic & MDIC_READY)
   11620 			break;
   11621 	}
   11622 
   11623 	if ((mdic & MDIC_READY) == 0) {
   11624 		DPRINTF(sc, WM_DEBUG_GMII,
   11625 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11626 			device_xname(dev), phy, reg));
   11627 		return ETIMEDOUT;
   11628 	} else if (mdic & MDIC_E) {
   11629 		DPRINTF(sc, WM_DEBUG_GMII,
   11630 		    ("%s: MDIC write error: phy %d reg %d\n",
   11631 			device_xname(dev), phy, reg));
   11632 		return -1;
   11633 	}
   11634 
   11635 	/*
   11636 	 * Allow some time after each MDIC transaction to avoid
   11637 	 * reading duplicate data in the next MDIC transaction.
   11638 	 */
   11639 	if (sc->sc_type == WM_T_PCH2)
   11640 		delay(100);
   11641 
   11642 	return 0;
   11643 }
   11644 
   11645 /*
   11646  * wm_gmii_i82544_readreg:	[mii interface function]
   11647  *
   11648  *	Read a PHY register on the GMII.
   11649  */
   11650 static int
   11651 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11652 {
   11653 	struct wm_softc *sc = device_private(dev);
   11654 	int rv;
   11655 
   11656 	rv = sc->phy.acquire(sc);
   11657 	if (rv != 0) {
   11658 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11659 		return rv;
   11660 	}
   11661 
   11662 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11663 
   11664 	sc->phy.release(sc);
   11665 
   11666 	return rv;
   11667 }
   11668 
   11669 static int
   11670 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11671 {
   11672 	struct wm_softc *sc = device_private(dev);
   11673 	int rv;
   11674 
   11675 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11676 		switch (sc->sc_phytype) {
   11677 		case WMPHY_IGP:
   11678 		case WMPHY_IGP_2:
   11679 		case WMPHY_IGP_3:
   11680 			rv = wm_gmii_mdic_writereg(dev, phy,
   11681 			    IGPHY_PAGE_SELECT, reg);
   11682 			if (rv != 0)
   11683 				return rv;
   11684 			break;
   11685 		default:
   11686 #ifdef WM_DEBUG
   11687 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11688 			    __func__, sc->sc_phytype, reg);
   11689 #endif
   11690 			break;
   11691 		}
   11692 	}
   11693 
   11694 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11695 }
   11696 
   11697 /*
   11698  * wm_gmii_i82544_writereg:	[mii interface function]
   11699  *
   11700  *	Write a PHY register on the GMII.
   11701  */
   11702 static int
   11703 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11704 {
   11705 	struct wm_softc *sc = device_private(dev);
   11706 	int rv;
   11707 
   11708 	rv = sc->phy.acquire(sc);
   11709 	if (rv != 0) {
   11710 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11711 		return rv;
   11712 	}
   11713 
   11714 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11715 	sc->phy.release(sc);
   11716 
   11717 	return rv;
   11718 }
   11719 
   11720 static int
   11721 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11722 {
   11723 	struct wm_softc *sc = device_private(dev);
   11724 	int rv;
   11725 
   11726 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11727 		switch (sc->sc_phytype) {
   11728 		case WMPHY_IGP:
   11729 		case WMPHY_IGP_2:
   11730 		case WMPHY_IGP_3:
   11731 			rv = wm_gmii_mdic_writereg(dev, phy,
   11732 			    IGPHY_PAGE_SELECT, reg);
   11733 			if (rv != 0)
   11734 				return rv;
   11735 			break;
   11736 		default:
   11737 #ifdef WM_DEBUG
   11738 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11739 			    __func__, sc->sc_phytype, reg);
   11740 #endif
   11741 			break;
   11742 		}
   11743 	}
   11744 
   11745 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11746 }
   11747 
   11748 /*
   11749  * wm_gmii_i80003_readreg:	[mii interface function]
   11750  *
   11751  *	Read a PHY register on the kumeran
   11752  * This could be handled by the PHY layer if we didn't have to lock the
   11753  * resource ...
   11754  */
   11755 static int
   11756 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11757 {
   11758 	struct wm_softc *sc = device_private(dev);
   11759 	int page_select;
   11760 	uint16_t temp, temp2;
   11761 	int rv;
   11762 
   11763 	if (phy != 1) /* Only one PHY on kumeran bus */
   11764 		return -1;
   11765 
   11766 	rv = sc->phy.acquire(sc);
   11767 	if (rv != 0) {
   11768 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11769 		return rv;
   11770 	}
   11771 
   11772 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11773 		page_select = GG82563_PHY_PAGE_SELECT;
   11774 	else {
   11775 		/*
   11776 		 * Use Alternative Page Select register to access registers
   11777 		 * 30 and 31.
   11778 		 */
   11779 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11780 	}
   11781 	temp = reg >> GG82563_PAGE_SHIFT;
   11782 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11783 		goto out;
   11784 
   11785 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11786 		/*
   11787 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11788 		 * register.
   11789 		 */
   11790 		delay(200);
   11791 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11792 		if ((rv != 0) || (temp2 != temp)) {
   11793 			device_printf(dev, "%s failed\n", __func__);
   11794 			rv = -1;
   11795 			goto out;
   11796 		}
   11797 		delay(200);
   11798 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11799 		delay(200);
   11800 	} else
   11801 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11802 
   11803 out:
   11804 	sc->phy.release(sc);
   11805 	return rv;
   11806 }
   11807 
   11808 /*
   11809  * wm_gmii_i80003_writereg:	[mii interface function]
   11810  *
   11811  *	Write a PHY register on the kumeran.
   11812  * This could be handled by the PHY layer if we didn't have to lock the
   11813  * resource ...
   11814  */
   11815 static int
   11816 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11817 {
   11818 	struct wm_softc *sc = device_private(dev);
   11819 	int page_select, rv;
   11820 	uint16_t temp, temp2;
   11821 
   11822 	if (phy != 1) /* Only one PHY on kumeran bus */
   11823 		return -1;
   11824 
   11825 	rv = sc->phy.acquire(sc);
   11826 	if (rv != 0) {
   11827 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11828 		return rv;
   11829 	}
   11830 
   11831 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11832 		page_select = GG82563_PHY_PAGE_SELECT;
   11833 	else {
   11834 		/*
   11835 		 * Use Alternative Page Select register to access registers
   11836 		 * 30 and 31.
   11837 		 */
   11838 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11839 	}
   11840 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11841 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11842 		goto out;
   11843 
   11844 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11845 		/*
   11846 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11847 		 * register.
   11848 		 */
   11849 		delay(200);
   11850 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11851 		if ((rv != 0) || (temp2 != temp)) {
   11852 			device_printf(dev, "%s failed\n", __func__);
   11853 			rv = -1;
   11854 			goto out;
   11855 		}
   11856 		delay(200);
   11857 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11858 		delay(200);
   11859 	} else
   11860 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11861 
   11862 out:
   11863 	sc->phy.release(sc);
   11864 	return rv;
   11865 }
   11866 
   11867 /*
   11868  * wm_gmii_bm_readreg:	[mii interface function]
   11869  *
   11870  *	Read a PHY register on the kumeran
   11871  * This could be handled by the PHY layer if we didn't have to lock the
   11872  * resource ...
   11873  */
   11874 static int
   11875 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11876 {
   11877 	struct wm_softc *sc = device_private(dev);
   11878 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11879 	int rv;
   11880 
   11881 	rv = sc->phy.acquire(sc);
   11882 	if (rv != 0) {
   11883 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11884 		return rv;
   11885 	}
   11886 
   11887 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11888 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11889 		    || (reg == 31)) ? 1 : phy;
   11890 	/* Page 800 works differently than the rest so it has its own func */
   11891 	if (page == BM_WUC_PAGE) {
   11892 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11893 		goto release;
   11894 	}
   11895 
   11896 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11897 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11898 		    && (sc->sc_type != WM_T_82583))
   11899 			rv = wm_gmii_mdic_writereg(dev, phy,
   11900 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11901 		else
   11902 			rv = wm_gmii_mdic_writereg(dev, phy,
   11903 			    BME1000_PHY_PAGE_SELECT, page);
   11904 		if (rv != 0)
   11905 			goto release;
   11906 	}
   11907 
   11908 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11909 
   11910 release:
   11911 	sc->phy.release(sc);
   11912 	return rv;
   11913 }
   11914 
   11915 /*
   11916  * wm_gmii_bm_writereg:	[mii interface function]
   11917  *
   11918  *	Write a PHY register on the kumeran.
   11919  * This could be handled by the PHY layer if we didn't have to lock the
   11920  * resource ...
   11921  */
   11922 static int
   11923 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11924 {
   11925 	struct wm_softc *sc = device_private(dev);
   11926 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11927 	int rv;
   11928 
   11929 	rv = sc->phy.acquire(sc);
   11930 	if (rv != 0) {
   11931 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11932 		return rv;
   11933 	}
   11934 
   11935 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11936 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11937 		    || (reg == 31)) ? 1 : phy;
   11938 	/* Page 800 works differently than the rest so it has its own func */
   11939 	if (page == BM_WUC_PAGE) {
   11940 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11941 		goto release;
   11942 	}
   11943 
   11944 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11945 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11946 		    && (sc->sc_type != WM_T_82583))
   11947 			rv = wm_gmii_mdic_writereg(dev, phy,
   11948 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11949 		else
   11950 			rv = wm_gmii_mdic_writereg(dev, phy,
   11951 			    BME1000_PHY_PAGE_SELECT, page);
   11952 		if (rv != 0)
   11953 			goto release;
   11954 	}
   11955 
   11956 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11957 
   11958 release:
   11959 	sc->phy.release(sc);
   11960 	return rv;
   11961 }
   11962 
   11963 /*
   11964  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11965  *  @dev: pointer to the HW structure
   11966  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11967  *
   11968  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11969  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11970  */
   11971 static int
   11972 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11973 {
   11974 #ifdef WM_DEBUG
   11975 	struct wm_softc *sc = device_private(dev);
   11976 #endif
   11977 	uint16_t temp;
   11978 	int rv;
   11979 
   11980 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11981 		device_xname(dev), __func__));
   11982 
   11983 	if (!phy_regp)
   11984 		return -1;
   11985 
   11986 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11987 
   11988 	/* Select Port Control Registers page */
   11989 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11990 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11991 	if (rv != 0)
   11992 		return rv;
   11993 
   11994 	/* Read WUCE and save it */
   11995 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11996 	if (rv != 0)
   11997 		return rv;
   11998 
   11999 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12000 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12001 	 */
   12002 	temp = *phy_regp;
   12003 	temp |= BM_WUC_ENABLE_BIT;
   12004 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12005 
   12006 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12007 		return rv;
   12008 
   12009 	/* Select Host Wakeup Registers page - caller now able to write
   12010 	 * registers on the Wakeup registers page
   12011 	 */
   12012 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12013 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12014 }
   12015 
   12016 /*
   12017  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12018  *  @dev: pointer to the HW structure
   12019  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12020  *
   12021  *  Restore BM_WUC_ENABLE_REG to its original value.
   12022  *
   12023  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12024  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12025  *  caller.
   12026  */
   12027 static int
   12028 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12029 {
   12030 #ifdef WM_DEBUG
   12031 	struct wm_softc *sc = device_private(dev);
   12032 #endif
   12033 
   12034 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12035 		device_xname(dev), __func__));
   12036 
   12037 	if (!phy_regp)
   12038 		return -1;
   12039 
   12040 	/* Select Port Control Registers page */
   12041 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12042 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12043 
   12044 	/* Restore 769.17 to its original value */
   12045 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12046 
   12047 	return 0;
   12048 }
   12049 
   12050 /*
   12051  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12052  *  @sc: pointer to the HW structure
   12053  *  @offset: register offset to be read or written
   12054  *  @val: pointer to the data to read or write
   12055  *  @rd: determines if operation is read or write
   12056  *  @page_set: BM_WUC_PAGE already set and access enabled
   12057  *
   12058  *  Read the PHY register at offset and store the retrieved information in
   12059  *  data, or write data to PHY register at offset.  Note the procedure to
   12060  *  access the PHY wakeup registers is different than reading the other PHY
   12061  *  registers. It works as such:
   12062  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12063  *  2) Set page to 800 for host (801 if we were manageability)
   12064  *  3) Write the address using the address opcode (0x11)
   12065  *  4) Read or write the data using the data opcode (0x12)
   12066  *  5) Restore 769.17.2 to its original value
   12067  *
   12068  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12069  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12070  *
   12071  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12072  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12073  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12074  */
   12075 static int
   12076 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12077 	bool page_set)
   12078 {
   12079 	struct wm_softc *sc = device_private(dev);
   12080 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12081 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12082 	uint16_t wuce;
   12083 	int rv = 0;
   12084 
   12085 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12086 		device_xname(dev), __func__));
   12087 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12088 	if ((sc->sc_type == WM_T_PCH)
   12089 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12090 		device_printf(dev,
   12091 		    "Attempting to access page %d while gig enabled.\n", page);
   12092 	}
   12093 
   12094 	if (!page_set) {
   12095 		/* Enable access to PHY wakeup registers */
   12096 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12097 		if (rv != 0) {
   12098 			device_printf(dev,
   12099 			    "%s: Could not enable PHY wakeup reg access\n",
   12100 			    __func__);
   12101 			return rv;
   12102 		}
   12103 	}
   12104 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12105 		device_xname(sc->sc_dev), __func__, page, regnum));
   12106 
   12107 	/*
   12108 	 * 2) Access PHY wakeup register.
   12109 	 * See wm_access_phy_wakeup_reg_bm.
   12110 	 */
   12111 
   12112 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12113 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12114 	if (rv != 0)
   12115 		return rv;
   12116 
   12117 	if (rd) {
   12118 		/* Read the Wakeup register page value using opcode 0x12 */
   12119 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12120 	} else {
   12121 		/* Write the Wakeup register page value using opcode 0x12 */
   12122 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12123 	}
   12124 	if (rv != 0)
   12125 		return rv;
   12126 
   12127 	if (!page_set)
   12128 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12129 
   12130 	return rv;
   12131 }
   12132 
   12133 /*
   12134  * wm_gmii_hv_readreg:	[mii interface function]
   12135  *
   12136  *	Read a PHY register on the kumeran
   12137  * This could be handled by the PHY layer if we didn't have to lock the
   12138  * resource ...
   12139  */
   12140 static int
   12141 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12142 {
   12143 	struct wm_softc *sc = device_private(dev);
   12144 	int rv;
   12145 
   12146 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12147 		device_xname(dev), __func__));
   12148 
   12149 	rv = sc->phy.acquire(sc);
   12150 	if (rv != 0) {
   12151 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12152 		return rv;
   12153 	}
   12154 
   12155 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12156 	sc->phy.release(sc);
   12157 	return rv;
   12158 }
   12159 
   12160 static int
   12161 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12162 {
   12163 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12164 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12165 	int rv;
   12166 
   12167 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12168 
   12169 	/* Page 800 works differently than the rest so it has its own func */
   12170 	if (page == BM_WUC_PAGE)
   12171 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12172 
   12173 	/*
   12174 	 * Lower than page 768 works differently than the rest so it has its
   12175 	 * own func
   12176 	 */
   12177 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12178 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12179 		return -1;
   12180 	}
   12181 
   12182 	/*
   12183 	 * XXX I21[789] documents say that the SMBus Address register is at
   12184 	 * PHY address 01, Page 0 (not 768), Register 26.
   12185 	 */
   12186 	if (page == HV_INTC_FC_PAGE_START)
   12187 		page = 0;
   12188 
   12189 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12190 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12191 		    page << BME1000_PAGE_SHIFT);
   12192 		if (rv != 0)
   12193 			return rv;
   12194 	}
   12195 
   12196 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12197 }
   12198 
   12199 /*
   12200  * wm_gmii_hv_writereg:	[mii interface function]
   12201  *
   12202  *	Write a PHY register on the kumeran.
   12203  * This could be handled by the PHY layer if we didn't have to lock the
   12204  * resource ...
   12205  */
   12206 static int
   12207 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12208 {
   12209 	struct wm_softc *sc = device_private(dev);
   12210 	int rv;
   12211 
   12212 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12213 		device_xname(dev), __func__));
   12214 
   12215 	rv = sc->phy.acquire(sc);
   12216 	if (rv != 0) {
   12217 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12218 		return rv;
   12219 	}
   12220 
   12221 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12222 	sc->phy.release(sc);
   12223 
   12224 	return rv;
   12225 }
   12226 
   12227 static int
   12228 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12229 {
   12230 	struct wm_softc *sc = device_private(dev);
   12231 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12232 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12233 	int rv;
   12234 
   12235 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12236 
   12237 	/* Page 800 works differently than the rest so it has its own func */
   12238 	if (page == BM_WUC_PAGE)
   12239 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12240 		    false);
   12241 
   12242 	/*
   12243 	 * Lower than page 768 works differently than the rest so it has its
   12244 	 * own func
   12245 	 */
   12246 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12247 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12248 		return -1;
   12249 	}
   12250 
   12251 	{
   12252 		/*
   12253 		 * XXX I21[789] documents say that the SMBus Address register
   12254 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12255 		 */
   12256 		if (page == HV_INTC_FC_PAGE_START)
   12257 			page = 0;
   12258 
   12259 		/*
   12260 		 * XXX Workaround MDIO accesses being disabled after entering
   12261 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12262 		 * register is set)
   12263 		 */
   12264 		if (sc->sc_phytype == WMPHY_82578) {
   12265 			struct mii_softc *child;
   12266 
   12267 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12268 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12269 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12270 			    && ((val & (1 << 11)) != 0)) {
   12271 				device_printf(dev, "XXX need workaround\n");
   12272 			}
   12273 		}
   12274 
   12275 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12276 			rv = wm_gmii_mdic_writereg(dev, 1,
   12277 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12278 			if (rv != 0)
   12279 				return rv;
   12280 		}
   12281 	}
   12282 
   12283 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12284 }
   12285 
   12286 /*
   12287  * wm_gmii_82580_readreg:	[mii interface function]
   12288  *
   12289  *	Read a PHY register on the 82580 and I350.
   12290  * This could be handled by the PHY layer if we didn't have to lock the
   12291  * resource ...
   12292  */
   12293 static int
   12294 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12295 {
   12296 	struct wm_softc *sc = device_private(dev);
   12297 	int rv;
   12298 
   12299 	rv = sc->phy.acquire(sc);
   12300 	if (rv != 0) {
   12301 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12302 		return rv;
   12303 	}
   12304 
   12305 #ifdef DIAGNOSTIC
   12306 	if (reg > MII_ADDRMASK) {
   12307 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12308 		    __func__, sc->sc_phytype, reg);
   12309 		reg &= MII_ADDRMASK;
   12310 	}
   12311 #endif
   12312 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12313 
   12314 	sc->phy.release(sc);
   12315 	return rv;
   12316 }
   12317 
   12318 /*
   12319  * wm_gmii_82580_writereg:	[mii interface function]
   12320  *
   12321  *	Write a PHY register on the 82580 and I350.
   12322  * This could be handled by the PHY layer if we didn't have to lock the
   12323  * resource ...
   12324  */
   12325 static int
   12326 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12327 {
   12328 	struct wm_softc *sc = device_private(dev);
   12329 	int rv;
   12330 
   12331 	rv = sc->phy.acquire(sc);
   12332 	if (rv != 0) {
   12333 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12334 		return rv;
   12335 	}
   12336 
   12337 #ifdef DIAGNOSTIC
   12338 	if (reg > MII_ADDRMASK) {
   12339 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12340 		    __func__, sc->sc_phytype, reg);
   12341 		reg &= MII_ADDRMASK;
   12342 	}
   12343 #endif
   12344 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12345 
   12346 	sc->phy.release(sc);
   12347 	return rv;
   12348 }
   12349 
   12350 /*
   12351  * wm_gmii_gs40g_readreg:	[mii interface function]
   12352  *
   12353  *	Read a PHY register on the I2100 and I211.
   12354  * This could be handled by the PHY layer if we didn't have to lock the
   12355  * resource ...
   12356  */
   12357 static int
   12358 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12359 {
   12360 	struct wm_softc *sc = device_private(dev);
   12361 	int page, offset;
   12362 	int rv;
   12363 
   12364 	/* Acquire semaphore */
   12365 	rv = sc->phy.acquire(sc);
   12366 	if (rv != 0) {
   12367 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12368 		return rv;
   12369 	}
   12370 
   12371 	/* Page select */
   12372 	page = reg >> GS40G_PAGE_SHIFT;
   12373 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12374 	if (rv != 0)
   12375 		goto release;
   12376 
   12377 	/* Read reg */
   12378 	offset = reg & GS40G_OFFSET_MASK;
   12379 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12380 
   12381 release:
   12382 	sc->phy.release(sc);
   12383 	return rv;
   12384 }
   12385 
   12386 /*
   12387  * wm_gmii_gs40g_writereg:	[mii interface function]
   12388  *
   12389  *	Write a PHY register on the I210 and I211.
   12390  * This could be handled by the PHY layer if we didn't have to lock the
   12391  * resource ...
   12392  */
   12393 static int
   12394 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12395 {
   12396 	struct wm_softc *sc = device_private(dev);
   12397 	uint16_t page;
   12398 	int offset, rv;
   12399 
   12400 	/* Acquire semaphore */
   12401 	rv = sc->phy.acquire(sc);
   12402 	if (rv != 0) {
   12403 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12404 		return rv;
   12405 	}
   12406 
   12407 	/* Page select */
   12408 	page = reg >> GS40G_PAGE_SHIFT;
   12409 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12410 	if (rv != 0)
   12411 		goto release;
   12412 
   12413 	/* Write reg */
   12414 	offset = reg & GS40G_OFFSET_MASK;
   12415 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12416 
   12417 release:
   12418 	/* Release semaphore */
   12419 	sc->phy.release(sc);
   12420 	return rv;
   12421 }
   12422 
   12423 /*
   12424  * wm_gmii_statchg:	[mii interface function]
   12425  *
   12426  *	Callback from MII layer when media changes.
   12427  */
   12428 static void
   12429 wm_gmii_statchg(struct ifnet *ifp)
   12430 {
   12431 	struct wm_softc *sc = ifp->if_softc;
   12432 	struct mii_data *mii = &sc->sc_mii;
   12433 
   12434 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12435 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12436 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12437 
   12438 	/* Get flow control negotiation result. */
   12439 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12440 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12441 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12442 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12443 	}
   12444 
   12445 	if (sc->sc_flowflags & IFM_FLOW) {
   12446 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12447 			sc->sc_ctrl |= CTRL_TFCE;
   12448 			sc->sc_fcrtl |= FCRTL_XONE;
   12449 		}
   12450 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12451 			sc->sc_ctrl |= CTRL_RFCE;
   12452 	}
   12453 
   12454 	if (mii->mii_media_active & IFM_FDX) {
   12455 		DPRINTF(sc, WM_DEBUG_LINK,
   12456 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12457 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12458 	} else {
   12459 		DPRINTF(sc, WM_DEBUG_LINK,
   12460 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12461 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12462 	}
   12463 
   12464 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12465 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12466 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   12467 						 : WMREG_FCRTL, sc->sc_fcrtl);
   12468 	if (sc->sc_type == WM_T_80003) {
   12469 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12470 		case IFM_1000_T:
   12471 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12472 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12473 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12474 			break;
   12475 		default:
   12476 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12477 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12478 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12479 			break;
   12480 		}
   12481 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12482 	}
   12483 }
   12484 
   12485 /* kumeran related (80003, ICH* and PCH*) */
   12486 
   12487 /*
   12488  * wm_kmrn_readreg:
   12489  *
   12490  *	Read a kumeran register
   12491  */
   12492 static int
   12493 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12494 {
   12495 	int rv;
   12496 
   12497 	if (sc->sc_type == WM_T_80003)
   12498 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12499 	else
   12500 		rv = sc->phy.acquire(sc);
   12501 	if (rv != 0) {
   12502 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12503 		    __func__);
   12504 		return rv;
   12505 	}
   12506 
   12507 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12508 
   12509 	if (sc->sc_type == WM_T_80003)
   12510 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12511 	else
   12512 		sc->phy.release(sc);
   12513 
   12514 	return rv;
   12515 }
   12516 
   12517 static int
   12518 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12519 {
   12520 
   12521 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12522 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12523 	    KUMCTRLSTA_REN);
   12524 	CSR_WRITE_FLUSH(sc);
   12525 	delay(2);
   12526 
   12527 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12528 
   12529 	return 0;
   12530 }
   12531 
   12532 /*
   12533  * wm_kmrn_writereg:
   12534  *
   12535  *	Write a kumeran register
   12536  */
   12537 static int
   12538 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12539 {
   12540 	int rv;
   12541 
   12542 	if (sc->sc_type == WM_T_80003)
   12543 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12544 	else
   12545 		rv = sc->phy.acquire(sc);
   12546 	if (rv != 0) {
   12547 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12548 		    __func__);
   12549 		return rv;
   12550 	}
   12551 
   12552 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12553 
   12554 	if (sc->sc_type == WM_T_80003)
   12555 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12556 	else
   12557 		sc->phy.release(sc);
   12558 
   12559 	return rv;
   12560 }
   12561 
   12562 static int
   12563 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12564 {
   12565 
   12566 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12567 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12568 
   12569 	return 0;
   12570 }
   12571 
   12572 /*
   12573  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12574  * This access method is different from IEEE MMD.
   12575  */
   12576 static int
   12577 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12578 {
   12579 	struct wm_softc *sc = device_private(dev);
   12580 	int rv;
   12581 
   12582 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12583 	if (rv != 0)
   12584 		return rv;
   12585 
   12586 	if (rd)
   12587 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12588 	else
   12589 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12590 	return rv;
   12591 }
   12592 
   12593 static int
   12594 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12595 {
   12596 
   12597 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12598 }
   12599 
   12600 static int
   12601 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12602 {
   12603 
   12604 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12605 }
   12606 
   12607 /* SGMII related */
   12608 
   12609 /*
   12610  * wm_sgmii_uses_mdio
   12611  *
   12612  * Check whether the transaction is to the internal PHY or the external
   12613  * MDIO interface. Return true if it's MDIO.
   12614  */
   12615 static bool
   12616 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12617 {
   12618 	uint32_t reg;
   12619 	bool ismdio = false;
   12620 
   12621 	switch (sc->sc_type) {
   12622 	case WM_T_82575:
   12623 	case WM_T_82576:
   12624 		reg = CSR_READ(sc, WMREG_MDIC);
   12625 		ismdio = ((reg & MDIC_DEST) != 0);
   12626 		break;
   12627 	case WM_T_82580:
   12628 	case WM_T_I350:
   12629 	case WM_T_I354:
   12630 	case WM_T_I210:
   12631 	case WM_T_I211:
   12632 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12633 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12634 		break;
   12635 	default:
   12636 		break;
   12637 	}
   12638 
   12639 	return ismdio;
   12640 }
   12641 
   12642 /* Setup internal SGMII PHY for SFP */
   12643 static void
   12644 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12645 {
   12646 	uint16_t id1, id2, phyreg;
   12647 	int i, rv;
   12648 
   12649 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12650 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12651 		return;
   12652 
   12653 	for (i = 0; i < MII_NPHY; i++) {
   12654 		sc->phy.no_errprint = true;
   12655 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12656 		if (rv != 0)
   12657 			continue;
   12658 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12659 		if (rv != 0)
   12660 			continue;
   12661 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12662 			continue;
   12663 		sc->phy.no_errprint = false;
   12664 
   12665 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12666 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12667 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12668 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12669 		break;
   12670 	}
   12671 
   12672 }
   12673 
   12674 /*
   12675  * wm_sgmii_readreg:	[mii interface function]
   12676  *
   12677  *	Read a PHY register on the SGMII
   12678  * This could be handled by the PHY layer if we didn't have to lock the
   12679  * resource ...
   12680  */
   12681 static int
   12682 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12683 {
   12684 	struct wm_softc *sc = device_private(dev);
   12685 	int rv;
   12686 
   12687 	rv = sc->phy.acquire(sc);
   12688 	if (rv != 0) {
   12689 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12690 		return rv;
   12691 	}
   12692 
   12693 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12694 
   12695 	sc->phy.release(sc);
   12696 	return rv;
   12697 }
   12698 
   12699 static int
   12700 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12701 {
   12702 	struct wm_softc *sc = device_private(dev);
   12703 	uint32_t i2ccmd;
   12704 	int i, rv = 0;
   12705 
   12706 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12707 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12708 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12709 
   12710 	/* Poll the ready bit */
   12711 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12712 		delay(50);
   12713 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12714 		if (i2ccmd & I2CCMD_READY)
   12715 			break;
   12716 	}
   12717 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12718 		device_printf(dev, "I2CCMD Read did not complete\n");
   12719 		rv = ETIMEDOUT;
   12720 	}
   12721 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12722 		if (!sc->phy.no_errprint)
   12723 			device_printf(dev, "I2CCMD Error bit set\n");
   12724 		rv = EIO;
   12725 	}
   12726 
   12727 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12728 
   12729 	return rv;
   12730 }
   12731 
   12732 /*
   12733  * wm_sgmii_writereg:	[mii interface function]
   12734  *
   12735  *	Write a PHY register on the SGMII.
   12736  * This could be handled by the PHY layer if we didn't have to lock the
   12737  * resource ...
   12738  */
   12739 static int
   12740 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12741 {
   12742 	struct wm_softc *sc = device_private(dev);
   12743 	int rv;
   12744 
   12745 	rv = sc->phy.acquire(sc);
   12746 	if (rv != 0) {
   12747 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12748 		return rv;
   12749 	}
   12750 
   12751 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12752 
   12753 	sc->phy.release(sc);
   12754 
   12755 	return rv;
   12756 }
   12757 
   12758 static int
   12759 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12760 {
   12761 	struct wm_softc *sc = device_private(dev);
   12762 	uint32_t i2ccmd;
   12763 	uint16_t swapdata;
   12764 	int rv = 0;
   12765 	int i;
   12766 
   12767 	/* Swap the data bytes for the I2C interface */
   12768 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12769 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12770 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12771 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12772 
   12773 	/* Poll the ready bit */
   12774 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12775 		delay(50);
   12776 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12777 		if (i2ccmd & I2CCMD_READY)
   12778 			break;
   12779 	}
   12780 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12781 		device_printf(dev, "I2CCMD Write did not complete\n");
   12782 		rv = ETIMEDOUT;
   12783 	}
   12784 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12785 		device_printf(dev, "I2CCMD Error bit set\n");
   12786 		rv = EIO;
   12787 	}
   12788 
   12789 	return rv;
   12790 }
   12791 
   12792 /* TBI related */
   12793 
   12794 static bool
   12795 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12796 {
   12797 	bool sig;
   12798 
   12799 	sig = ctrl & CTRL_SWDPIN(1);
   12800 
   12801 	/*
   12802 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12803 	 * detect a signal, 1 if they don't.
   12804 	 */
   12805 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12806 		sig = !sig;
   12807 
   12808 	return sig;
   12809 }
   12810 
   12811 /*
   12812  * wm_tbi_mediainit:
   12813  *
   12814  *	Initialize media for use on 1000BASE-X devices.
   12815  */
   12816 static void
   12817 wm_tbi_mediainit(struct wm_softc *sc)
   12818 {
   12819 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12820 	const char *sep = "";
   12821 
   12822 	if (sc->sc_type < WM_T_82543)
   12823 		sc->sc_tipg = TIPG_WM_DFLT;
   12824 	else
   12825 		sc->sc_tipg = TIPG_LG_DFLT;
   12826 
   12827 	sc->sc_tbi_serdes_anegticks = 5;
   12828 
   12829 	/* Initialize our media structures */
   12830 	sc->sc_mii.mii_ifp = ifp;
   12831 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12832 
   12833 	ifp->if_baudrate = IF_Gbps(1);
   12834 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12835 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12836 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12837 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12838 		    sc->sc_core_lock);
   12839 	} else {
   12840 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12841 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12842 	}
   12843 
   12844 	/*
   12845 	 * SWD Pins:
   12846 	 *
   12847 	 *	0 = Link LED (output)
   12848 	 *	1 = Loss Of Signal (input)
   12849 	 */
   12850 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12851 
   12852 	/* XXX Perhaps this is only for TBI */
   12853 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12854 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12855 
   12856 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12857 		sc->sc_ctrl &= ~CTRL_LRST;
   12858 
   12859 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12860 
   12861 #define	ADD(ss, mm, dd)							  \
   12862 do {									  \
   12863 	aprint_normal("%s%s", sep, ss);					  \
   12864 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12865 	sep = ", ";							  \
   12866 } while (/*CONSTCOND*/0)
   12867 
   12868 	aprint_normal_dev(sc->sc_dev, "");
   12869 
   12870 	if (sc->sc_type == WM_T_I354) {
   12871 		uint32_t status;
   12872 
   12873 		status = CSR_READ(sc, WMREG_STATUS);
   12874 		if (((status & STATUS_2P5_SKU) != 0)
   12875 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12876 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12877 		} else
   12878 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12879 	} else if (sc->sc_type == WM_T_82545) {
   12880 		/* Only 82545 is LX (XXX except SFP) */
   12881 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12882 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12883 	} else if (sc->sc_sfptype != 0) {
   12884 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12885 		switch (sc->sc_sfptype) {
   12886 		default:
   12887 		case SFF_SFP_ETH_FLAGS_1000SX:
   12888 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12889 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12890 			break;
   12891 		case SFF_SFP_ETH_FLAGS_1000LX:
   12892 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12893 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12894 			break;
   12895 		case SFF_SFP_ETH_FLAGS_1000CX:
   12896 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12897 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12898 			break;
   12899 		case SFF_SFP_ETH_FLAGS_1000T:
   12900 			ADD("1000baseT", IFM_1000_T, 0);
   12901 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12902 			break;
   12903 		case SFF_SFP_ETH_FLAGS_100FX:
   12904 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12905 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12906 			break;
   12907 		}
   12908 	} else {
   12909 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12910 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12911 	}
   12912 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12913 	aprint_normal("\n");
   12914 
   12915 #undef ADD
   12916 
   12917 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12918 }
   12919 
   12920 /*
   12921  * wm_tbi_mediachange:	[ifmedia interface function]
   12922  *
   12923  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12924  */
   12925 static int
   12926 wm_tbi_mediachange(struct ifnet *ifp)
   12927 {
   12928 	struct wm_softc *sc = ifp->if_softc;
   12929 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12930 	uint32_t status, ctrl;
   12931 	bool signal;
   12932 	int i;
   12933 
   12934 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12935 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12936 		/* XXX need some work for >= 82571 and < 82575 */
   12937 		if (sc->sc_type < WM_T_82575)
   12938 			return 0;
   12939 	}
   12940 
   12941 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12942 	    || (sc->sc_type >= WM_T_82575))
   12943 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12944 
   12945 	sc->sc_ctrl &= ~CTRL_LRST;
   12946 	sc->sc_txcw = TXCW_ANE;
   12947 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12948 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12949 	else if (ife->ifm_media & IFM_FDX)
   12950 		sc->sc_txcw |= TXCW_FD;
   12951 	else
   12952 		sc->sc_txcw |= TXCW_HD;
   12953 
   12954 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12955 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12956 
   12957 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12958 		device_xname(sc->sc_dev), sc->sc_txcw));
   12959 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12960 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12961 	CSR_WRITE_FLUSH(sc);
   12962 	delay(1000);
   12963 
   12964 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12965 	signal = wm_tbi_havesignal(sc, ctrl);
   12966 
   12967 	DPRINTF(sc, WM_DEBUG_LINK,
   12968 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   12969 
   12970 	if (signal) {
   12971 		/* Have signal; wait for the link to come up. */
   12972 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12973 			delay(10000);
   12974 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12975 				break;
   12976 		}
   12977 
   12978 		DPRINTF(sc, WM_DEBUG_LINK,
   12979 		    ("%s: i = %d after waiting for link\n",
   12980 			device_xname(sc->sc_dev), i));
   12981 
   12982 		status = CSR_READ(sc, WMREG_STATUS);
   12983 		DPRINTF(sc, WM_DEBUG_LINK,
   12984 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   12985 			__PRIxBIT "\n",
   12986 			device_xname(sc->sc_dev), status, STATUS_LU));
   12987 		if (status & STATUS_LU) {
   12988 			/* Link is up. */
   12989 			DPRINTF(sc, WM_DEBUG_LINK,
   12990 			    ("%s: LINK: set media -> link up %s\n",
   12991 				device_xname(sc->sc_dev),
   12992 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12993 
   12994 			/*
   12995 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12996 			 * so we should update sc->sc_ctrl
   12997 			 */
   12998 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12999 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13000 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13001 			if (status & STATUS_FD)
   13002 				sc->sc_tctl |=
   13003 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13004 			else
   13005 				sc->sc_tctl |=
   13006 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13007 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13008 				sc->sc_fcrtl |= FCRTL_XONE;
   13009 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13010 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13011 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13012 			sc->sc_tbi_linkup = 1;
   13013 		} else {
   13014 			if (i == WM_LINKUP_TIMEOUT)
   13015 				wm_check_for_link(sc);
   13016 			/* Link is down. */
   13017 			DPRINTF(sc, WM_DEBUG_LINK,
   13018 			    ("%s: LINK: set media -> link down\n",
   13019 				device_xname(sc->sc_dev)));
   13020 			sc->sc_tbi_linkup = 0;
   13021 		}
   13022 	} else {
   13023 		DPRINTF(sc, WM_DEBUG_LINK,
   13024 		    ("%s: LINK: set media -> no signal\n",
   13025 			device_xname(sc->sc_dev)));
   13026 		sc->sc_tbi_linkup = 0;
   13027 	}
   13028 
   13029 	wm_tbi_serdes_set_linkled(sc);
   13030 
   13031 	return 0;
   13032 }
   13033 
   13034 /*
   13035  * wm_tbi_mediastatus:	[ifmedia interface function]
   13036  *
   13037  *	Get the current interface media status on a 1000BASE-X device.
   13038  */
   13039 static void
   13040 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13041 {
   13042 	struct wm_softc *sc = ifp->if_softc;
   13043 	uint32_t ctrl, status;
   13044 
   13045 	ifmr->ifm_status = IFM_AVALID;
   13046 	ifmr->ifm_active = IFM_ETHER;
   13047 
   13048 	status = CSR_READ(sc, WMREG_STATUS);
   13049 	if ((status & STATUS_LU) == 0) {
   13050 		ifmr->ifm_active |= IFM_NONE;
   13051 		return;
   13052 	}
   13053 
   13054 	ifmr->ifm_status |= IFM_ACTIVE;
   13055 	/* Only 82545 is LX */
   13056 	if (sc->sc_type == WM_T_82545)
   13057 		ifmr->ifm_active |= IFM_1000_LX;
   13058 	else
   13059 		ifmr->ifm_active |= IFM_1000_SX;
   13060 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13061 		ifmr->ifm_active |= IFM_FDX;
   13062 	else
   13063 		ifmr->ifm_active |= IFM_HDX;
   13064 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13065 	if (ctrl & CTRL_RFCE)
   13066 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13067 	if (ctrl & CTRL_TFCE)
   13068 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13069 }
   13070 
   13071 /* XXX TBI only */
   13072 static int
   13073 wm_check_for_link(struct wm_softc *sc)
   13074 {
   13075 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13076 	uint32_t rxcw;
   13077 	uint32_t ctrl;
   13078 	uint32_t status;
   13079 	bool signal;
   13080 
   13081 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13082 		device_xname(sc->sc_dev), __func__));
   13083 
   13084 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13085 		/* XXX need some work for >= 82571 */
   13086 		if (sc->sc_type >= WM_T_82571) {
   13087 			sc->sc_tbi_linkup = 1;
   13088 			return 0;
   13089 		}
   13090 	}
   13091 
   13092 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13093 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13094 	status = CSR_READ(sc, WMREG_STATUS);
   13095 	signal = wm_tbi_havesignal(sc, ctrl);
   13096 
   13097 	DPRINTF(sc, WM_DEBUG_LINK,
   13098 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13099 		device_xname(sc->sc_dev), __func__, signal,
   13100 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13101 
   13102 	/*
   13103 	 * SWDPIN   LU RXCW
   13104 	 *	0    0	  0
   13105 	 *	0    0	  1	(should not happen)
   13106 	 *	0    1	  0	(should not happen)
   13107 	 *	0    1	  1	(should not happen)
   13108 	 *	1    0	  0	Disable autonego and force linkup
   13109 	 *	1    0	  1	got /C/ but not linkup yet
   13110 	 *	1    1	  0	(linkup)
   13111 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13112 	 *
   13113 	 */
   13114 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13115 		DPRINTF(sc, WM_DEBUG_LINK,
   13116 		    ("%s: %s: force linkup and fullduplex\n",
   13117 			device_xname(sc->sc_dev), __func__));
   13118 		sc->sc_tbi_linkup = 0;
   13119 		/* Disable auto-negotiation in the TXCW register */
   13120 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13121 
   13122 		/*
   13123 		 * Force link-up and also force full-duplex.
   13124 		 *
   13125 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13126 		 * so we should update sc->sc_ctrl
   13127 		 */
   13128 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13129 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13130 	} else if (((status & STATUS_LU) != 0)
   13131 	    && ((rxcw & RXCW_C) != 0)
   13132 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13133 		sc->sc_tbi_linkup = 1;
   13134 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13135 			device_xname(sc->sc_dev), __func__));
   13136 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13137 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13138 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13139 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13140 			device_xname(sc->sc_dev), __func__));
   13141 	} else {
   13142 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13143 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13144 			status));
   13145 	}
   13146 
   13147 	return 0;
   13148 }
   13149 
   13150 /*
   13151  * wm_tbi_tick:
   13152  *
   13153  *	Check the link on TBI devices.
   13154  *	This function acts as mii_tick().
   13155  */
   13156 static void
   13157 wm_tbi_tick(struct wm_softc *sc)
   13158 {
   13159 	struct mii_data *mii = &sc->sc_mii;
   13160 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13161 	uint32_t status;
   13162 
   13163 	KASSERT(mutex_owned(sc->sc_core_lock));
   13164 
   13165 	status = CSR_READ(sc, WMREG_STATUS);
   13166 
   13167 	/* XXX is this needed? */
   13168 	(void)CSR_READ(sc, WMREG_RXCW);
   13169 	(void)CSR_READ(sc, WMREG_CTRL);
   13170 
   13171 	/* set link status */
   13172 	if ((status & STATUS_LU) == 0) {
   13173 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13174 			device_xname(sc->sc_dev)));
   13175 		sc->sc_tbi_linkup = 0;
   13176 	} else if (sc->sc_tbi_linkup == 0) {
   13177 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13178 			device_xname(sc->sc_dev),
   13179 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13180 		sc->sc_tbi_linkup = 1;
   13181 		sc->sc_tbi_serdes_ticks = 0;
   13182 	}
   13183 
   13184 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13185 		goto setled;
   13186 
   13187 	if ((status & STATUS_LU) == 0) {
   13188 		sc->sc_tbi_linkup = 0;
   13189 		/* If the timer expired, retry autonegotiation */
   13190 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13191 		    && (++sc->sc_tbi_serdes_ticks
   13192 			>= sc->sc_tbi_serdes_anegticks)) {
   13193 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13194 				device_xname(sc->sc_dev), __func__));
   13195 			sc->sc_tbi_serdes_ticks = 0;
   13196 			/*
   13197 			 * Reset the link, and let autonegotiation do
   13198 			 * its thing
   13199 			 */
   13200 			sc->sc_ctrl |= CTRL_LRST;
   13201 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13202 			CSR_WRITE_FLUSH(sc);
   13203 			delay(1000);
   13204 			sc->sc_ctrl &= ~CTRL_LRST;
   13205 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13206 			CSR_WRITE_FLUSH(sc);
   13207 			delay(1000);
   13208 			CSR_WRITE(sc, WMREG_TXCW,
   13209 			    sc->sc_txcw & ~TXCW_ANE);
   13210 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13211 		}
   13212 	}
   13213 
   13214 setled:
   13215 	wm_tbi_serdes_set_linkled(sc);
   13216 }
   13217 
   13218 /* SERDES related */
   13219 static void
   13220 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13221 {
   13222 	uint32_t reg;
   13223 
   13224 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13225 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13226 		return;
   13227 
   13228 	/* Enable PCS to turn on link */
   13229 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13230 	reg |= PCS_CFG_PCS_EN;
   13231 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13232 
   13233 	/* Power up the laser */
   13234 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13235 	reg &= ~CTRL_EXT_SWDPIN(3);
   13236 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13237 
   13238 	/* Flush the write to verify completion */
   13239 	CSR_WRITE_FLUSH(sc);
   13240 	delay(1000);
   13241 }
   13242 
   13243 static int
   13244 wm_serdes_mediachange(struct ifnet *ifp)
   13245 {
   13246 	struct wm_softc *sc = ifp->if_softc;
   13247 	bool pcs_autoneg = true; /* XXX */
   13248 	uint32_t ctrl_ext, pcs_lctl, reg;
   13249 
   13250 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13251 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13252 		return 0;
   13253 
   13254 	/* XXX Currently, this function is not called on 8257[12] */
   13255 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13256 	    || (sc->sc_type >= WM_T_82575))
   13257 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13258 
   13259 	/* Power on the sfp cage if present */
   13260 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13261 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13262 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13263 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13264 
   13265 	sc->sc_ctrl |= CTRL_SLU;
   13266 
   13267 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13268 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13269 
   13270 		reg = CSR_READ(sc, WMREG_CONNSW);
   13271 		reg |= CONNSW_ENRGSRC;
   13272 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13273 	}
   13274 
   13275 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13276 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13277 	case CTRL_EXT_LINK_MODE_SGMII:
   13278 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13279 		pcs_autoneg = true;
   13280 		/* Autoneg time out should be disabled for SGMII mode */
   13281 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13282 		break;
   13283 	case CTRL_EXT_LINK_MODE_1000KX:
   13284 		pcs_autoneg = false;
   13285 		/* FALLTHROUGH */
   13286 	default:
   13287 		if ((sc->sc_type == WM_T_82575)
   13288 		    || (sc->sc_type == WM_T_82576)) {
   13289 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13290 				pcs_autoneg = false;
   13291 		}
   13292 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13293 		    | CTRL_FRCFDX;
   13294 
   13295 		/* Set speed of 1000/Full if speed/duplex is forced */
   13296 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13297 	}
   13298 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13299 
   13300 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13301 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13302 
   13303 	if (pcs_autoneg) {
   13304 		/* Set PCS register for autoneg */
   13305 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13306 
   13307 		/* Disable force flow control for autoneg */
   13308 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13309 
   13310 		/* Configure flow control advertisement for autoneg */
   13311 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13312 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13313 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13314 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13315 	} else
   13316 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13317 
   13318 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13319 
   13320 	return 0;
   13321 }
   13322 
   13323 static void
   13324 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13325 {
   13326 	struct wm_softc *sc = ifp->if_softc;
   13327 	struct mii_data *mii = &sc->sc_mii;
   13328 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13329 	uint32_t pcs_adv, pcs_lpab, reg;
   13330 
   13331 	ifmr->ifm_status = IFM_AVALID;
   13332 	ifmr->ifm_active = IFM_ETHER;
   13333 
   13334 	/* Check PCS */
   13335 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13336 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13337 		ifmr->ifm_active |= IFM_NONE;
   13338 		sc->sc_tbi_linkup = 0;
   13339 		goto setled;
   13340 	}
   13341 
   13342 	sc->sc_tbi_linkup = 1;
   13343 	ifmr->ifm_status |= IFM_ACTIVE;
   13344 	if (sc->sc_type == WM_T_I354) {
   13345 		uint32_t status;
   13346 
   13347 		status = CSR_READ(sc, WMREG_STATUS);
   13348 		if (((status & STATUS_2P5_SKU) != 0)
   13349 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13350 			ifmr->ifm_active |= IFM_2500_KX;
   13351 		} else
   13352 			ifmr->ifm_active |= IFM_1000_KX;
   13353 	} else {
   13354 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13355 		case PCS_LSTS_SPEED_10:
   13356 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13357 			break;
   13358 		case PCS_LSTS_SPEED_100:
   13359 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13360 			break;
   13361 		case PCS_LSTS_SPEED_1000:
   13362 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13363 			break;
   13364 		default:
   13365 			device_printf(sc->sc_dev, "Unknown speed\n");
   13366 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13367 			break;
   13368 		}
   13369 	}
   13370 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13371 	if ((reg & PCS_LSTS_FDX) != 0)
   13372 		ifmr->ifm_active |= IFM_FDX;
   13373 	else
   13374 		ifmr->ifm_active |= IFM_HDX;
   13375 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13376 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13377 		/* Check flow */
   13378 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13379 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13380 			DPRINTF(sc, WM_DEBUG_LINK,
   13381 			    ("XXX LINKOK but not ACOMP\n"));
   13382 			goto setled;
   13383 		}
   13384 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13385 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13386 		DPRINTF(sc, WM_DEBUG_LINK,
   13387 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13388 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13389 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13390 			mii->mii_media_active |= IFM_FLOW
   13391 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13392 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13393 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13394 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13395 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13396 			mii->mii_media_active |= IFM_FLOW
   13397 			    | IFM_ETH_TXPAUSE;
   13398 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13399 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13400 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13401 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13402 			mii->mii_media_active |= IFM_FLOW
   13403 			    | IFM_ETH_RXPAUSE;
   13404 		}
   13405 	}
   13406 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13407 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13408 setled:
   13409 	wm_tbi_serdes_set_linkled(sc);
   13410 }
   13411 
   13412 /*
   13413  * wm_serdes_tick:
   13414  *
   13415  *	Check the link on serdes devices.
   13416  */
   13417 static void
   13418 wm_serdes_tick(struct wm_softc *sc)
   13419 {
   13420 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13421 	struct mii_data *mii = &sc->sc_mii;
   13422 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13423 	uint32_t reg;
   13424 
   13425 	KASSERT(mutex_owned(sc->sc_core_lock));
   13426 
   13427 	mii->mii_media_status = IFM_AVALID;
   13428 	mii->mii_media_active = IFM_ETHER;
   13429 
   13430 	/* Check PCS */
   13431 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13432 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13433 		mii->mii_media_status |= IFM_ACTIVE;
   13434 		sc->sc_tbi_linkup = 1;
   13435 		sc->sc_tbi_serdes_ticks = 0;
   13436 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13437 		if ((reg & PCS_LSTS_FDX) != 0)
   13438 			mii->mii_media_active |= IFM_FDX;
   13439 		else
   13440 			mii->mii_media_active |= IFM_HDX;
   13441 	} else {
   13442 		mii->mii_media_status |= IFM_NONE;
   13443 		sc->sc_tbi_linkup = 0;
   13444 		/* If the timer expired, retry autonegotiation */
   13445 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13446 		    && (++sc->sc_tbi_serdes_ticks
   13447 			>= sc->sc_tbi_serdes_anegticks)) {
   13448 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13449 				device_xname(sc->sc_dev), __func__));
   13450 			sc->sc_tbi_serdes_ticks = 0;
   13451 			/* XXX */
   13452 			wm_serdes_mediachange(ifp);
   13453 		}
   13454 	}
   13455 
   13456 	wm_tbi_serdes_set_linkled(sc);
   13457 }
   13458 
   13459 /* SFP related */
   13460 
   13461 static int
   13462 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13463 {
   13464 	uint32_t i2ccmd;
   13465 	int i;
   13466 
   13467 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13468 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13469 
   13470 	/* Poll the ready bit */
   13471 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13472 		delay(50);
   13473 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13474 		if (i2ccmd & I2CCMD_READY)
   13475 			break;
   13476 	}
   13477 	if ((i2ccmd & I2CCMD_READY) == 0)
   13478 		return -1;
   13479 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13480 		return -1;
   13481 
   13482 	*data = i2ccmd & 0x00ff;
   13483 
   13484 	return 0;
   13485 }
   13486 
   13487 static uint32_t
   13488 wm_sfp_get_media_type(struct wm_softc *sc)
   13489 {
   13490 	uint32_t ctrl_ext;
   13491 	uint8_t val = 0;
   13492 	int timeout = 3;
   13493 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13494 	int rv = -1;
   13495 
   13496 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13497 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13498 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13499 	CSR_WRITE_FLUSH(sc);
   13500 
   13501 	/* Read SFP module data */
   13502 	while (timeout) {
   13503 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13504 		if (rv == 0)
   13505 			break;
   13506 		delay(100*1000); /* XXX too big */
   13507 		timeout--;
   13508 	}
   13509 	if (rv != 0)
   13510 		goto out;
   13511 
   13512 	switch (val) {
   13513 	case SFF_SFP_ID_SFF:
   13514 		aprint_normal_dev(sc->sc_dev,
   13515 		    "Module/Connector soldered to board\n");
   13516 		break;
   13517 	case SFF_SFP_ID_SFP:
   13518 		sc->sc_flags |= WM_F_SFP;
   13519 		break;
   13520 	case SFF_SFP_ID_UNKNOWN:
   13521 		goto out;
   13522 	default:
   13523 		break;
   13524 	}
   13525 
   13526 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13527 	if (rv != 0)
   13528 		goto out;
   13529 
   13530 	sc->sc_sfptype = val;
   13531 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13532 		mediatype = WM_MEDIATYPE_SERDES;
   13533 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13534 		sc->sc_flags |= WM_F_SGMII;
   13535 		mediatype = WM_MEDIATYPE_COPPER;
   13536 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13537 		sc->sc_flags |= WM_F_SGMII;
   13538 		mediatype = WM_MEDIATYPE_SERDES;
   13539 	} else {
   13540 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13541 		    __func__, sc->sc_sfptype);
   13542 		sc->sc_sfptype = 0; /* XXX unknown */
   13543 	}
   13544 
   13545 out:
   13546 	/* Restore I2C interface setting */
   13547 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13548 
   13549 	return mediatype;
   13550 }
   13551 
   13552 /*
   13553  * NVM related.
   13554  * Microwire, SPI (w/wo EERD) and Flash.
   13555  */
   13556 
   13557 /* Both spi and uwire */
   13558 
   13559 /*
   13560  * wm_eeprom_sendbits:
   13561  *
   13562  *	Send a series of bits to the EEPROM.
   13563  */
   13564 static void
   13565 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13566 {
   13567 	uint32_t reg;
   13568 	int x;
   13569 
   13570 	reg = CSR_READ(sc, WMREG_EECD);
   13571 
   13572 	for (x = nbits; x > 0; x--) {
   13573 		if (bits & (1U << (x - 1)))
   13574 			reg |= EECD_DI;
   13575 		else
   13576 			reg &= ~EECD_DI;
   13577 		CSR_WRITE(sc, WMREG_EECD, reg);
   13578 		CSR_WRITE_FLUSH(sc);
   13579 		delay(2);
   13580 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13581 		CSR_WRITE_FLUSH(sc);
   13582 		delay(2);
   13583 		CSR_WRITE(sc, WMREG_EECD, reg);
   13584 		CSR_WRITE_FLUSH(sc);
   13585 		delay(2);
   13586 	}
   13587 }
   13588 
   13589 /*
   13590  * wm_eeprom_recvbits:
   13591  *
   13592  *	Receive a series of bits from the EEPROM.
   13593  */
   13594 static void
   13595 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13596 {
   13597 	uint32_t reg, val;
   13598 	int x;
   13599 
   13600 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13601 
   13602 	val = 0;
   13603 	for (x = nbits; x > 0; x--) {
   13604 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13605 		CSR_WRITE_FLUSH(sc);
   13606 		delay(2);
   13607 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13608 			val |= (1U << (x - 1));
   13609 		CSR_WRITE(sc, WMREG_EECD, reg);
   13610 		CSR_WRITE_FLUSH(sc);
   13611 		delay(2);
   13612 	}
   13613 	*valp = val;
   13614 }
   13615 
   13616 /* Microwire */
   13617 
   13618 /*
   13619  * wm_nvm_read_uwire:
   13620  *
   13621  *	Read a word from the EEPROM using the MicroWire protocol.
   13622  */
   13623 static int
   13624 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13625 {
   13626 	uint32_t reg, val;
   13627 	int i, rv;
   13628 
   13629 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13630 		device_xname(sc->sc_dev), __func__));
   13631 
   13632 	rv = sc->nvm.acquire(sc);
   13633 	if (rv != 0)
   13634 		return rv;
   13635 
   13636 	for (i = 0; i < wordcnt; i++) {
   13637 		/* Clear SK and DI. */
   13638 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13639 		CSR_WRITE(sc, WMREG_EECD, reg);
   13640 
   13641 		/*
   13642 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13643 		 * and Xen.
   13644 		 *
   13645 		 * We use this workaround only for 82540 because qemu's
   13646 		 * e1000 act as 82540.
   13647 		 */
   13648 		if (sc->sc_type == WM_T_82540) {
   13649 			reg |= EECD_SK;
   13650 			CSR_WRITE(sc, WMREG_EECD, reg);
   13651 			reg &= ~EECD_SK;
   13652 			CSR_WRITE(sc, WMREG_EECD, reg);
   13653 			CSR_WRITE_FLUSH(sc);
   13654 			delay(2);
   13655 		}
   13656 		/* XXX: end of workaround */
   13657 
   13658 		/* Set CHIP SELECT. */
   13659 		reg |= EECD_CS;
   13660 		CSR_WRITE(sc, WMREG_EECD, reg);
   13661 		CSR_WRITE_FLUSH(sc);
   13662 		delay(2);
   13663 
   13664 		/* Shift in the READ command. */
   13665 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13666 
   13667 		/* Shift in address. */
   13668 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13669 
   13670 		/* Shift out the data. */
   13671 		wm_eeprom_recvbits(sc, &val, 16);
   13672 		data[i] = val & 0xffff;
   13673 
   13674 		/* Clear CHIP SELECT. */
   13675 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13676 		CSR_WRITE(sc, WMREG_EECD, reg);
   13677 		CSR_WRITE_FLUSH(sc);
   13678 		delay(2);
   13679 	}
   13680 
   13681 	sc->nvm.release(sc);
   13682 	return 0;
   13683 }
   13684 
   13685 /* SPI */
   13686 
   13687 /*
   13688  * Set SPI and FLASH related information from the EECD register.
   13689  * For 82541 and 82547, the word size is taken from EEPROM.
   13690  */
   13691 static int
   13692 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13693 {
   13694 	int size;
   13695 	uint32_t reg;
   13696 	uint16_t data;
   13697 
   13698 	reg = CSR_READ(sc, WMREG_EECD);
   13699 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13700 
   13701 	/* Read the size of NVM from EECD by default */
   13702 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13703 	switch (sc->sc_type) {
   13704 	case WM_T_82541:
   13705 	case WM_T_82541_2:
   13706 	case WM_T_82547:
   13707 	case WM_T_82547_2:
   13708 		/* Set dummy value to access EEPROM */
   13709 		sc->sc_nvm_wordsize = 64;
   13710 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13711 			aprint_error_dev(sc->sc_dev,
   13712 			    "%s: failed to read EEPROM size\n", __func__);
   13713 		}
   13714 		reg = data;
   13715 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13716 		if (size == 0)
   13717 			size = 6; /* 64 word size */
   13718 		else
   13719 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13720 		break;
   13721 	case WM_T_80003:
   13722 	case WM_T_82571:
   13723 	case WM_T_82572:
   13724 	case WM_T_82573: /* SPI case */
   13725 	case WM_T_82574: /* SPI case */
   13726 	case WM_T_82583: /* SPI case */
   13727 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13728 		if (size > 14)
   13729 			size = 14;
   13730 		break;
   13731 	case WM_T_82575:
   13732 	case WM_T_82576:
   13733 	case WM_T_82580:
   13734 	case WM_T_I350:
   13735 	case WM_T_I354:
   13736 	case WM_T_I210:
   13737 	case WM_T_I211:
   13738 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13739 		if (size > 15)
   13740 			size = 15;
   13741 		break;
   13742 	default:
   13743 		aprint_error_dev(sc->sc_dev,
   13744 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13745 		return -1;
   13746 		break;
   13747 	}
   13748 
   13749 	sc->sc_nvm_wordsize = 1 << size;
   13750 
   13751 	return 0;
   13752 }
   13753 
   13754 /*
   13755  * wm_nvm_ready_spi:
   13756  *
   13757  *	Wait for a SPI EEPROM to be ready for commands.
   13758  */
   13759 static int
   13760 wm_nvm_ready_spi(struct wm_softc *sc)
   13761 {
   13762 	uint32_t val;
   13763 	int usec;
   13764 
   13765 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13766 		device_xname(sc->sc_dev), __func__));
   13767 
   13768 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13769 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13770 		wm_eeprom_recvbits(sc, &val, 8);
   13771 		if ((val & SPI_SR_RDY) == 0)
   13772 			break;
   13773 	}
   13774 	if (usec >= SPI_MAX_RETRIES) {
   13775 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13776 		return -1;
   13777 	}
   13778 	return 0;
   13779 }
   13780 
   13781 /*
   13782  * wm_nvm_read_spi:
   13783  *
   13784  *	Read a work from the EEPROM using the SPI protocol.
   13785  */
   13786 static int
   13787 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13788 {
   13789 	uint32_t reg, val;
   13790 	int i;
   13791 	uint8_t opc;
   13792 	int rv;
   13793 
   13794 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13795 		device_xname(sc->sc_dev), __func__));
   13796 
   13797 	rv = sc->nvm.acquire(sc);
   13798 	if (rv != 0)
   13799 		return rv;
   13800 
   13801 	/* Clear SK and CS. */
   13802 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13803 	CSR_WRITE(sc, WMREG_EECD, reg);
   13804 	CSR_WRITE_FLUSH(sc);
   13805 	delay(2);
   13806 
   13807 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13808 		goto out;
   13809 
   13810 	/* Toggle CS to flush commands. */
   13811 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13812 	CSR_WRITE_FLUSH(sc);
   13813 	delay(2);
   13814 	CSR_WRITE(sc, WMREG_EECD, reg);
   13815 	CSR_WRITE_FLUSH(sc);
   13816 	delay(2);
   13817 
   13818 	opc = SPI_OPC_READ;
   13819 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13820 		opc |= SPI_OPC_A8;
   13821 
   13822 	wm_eeprom_sendbits(sc, opc, 8);
   13823 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13824 
   13825 	for (i = 0; i < wordcnt; i++) {
   13826 		wm_eeprom_recvbits(sc, &val, 16);
   13827 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13828 	}
   13829 
   13830 	/* Raise CS and clear SK. */
   13831 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13832 	CSR_WRITE(sc, WMREG_EECD, reg);
   13833 	CSR_WRITE_FLUSH(sc);
   13834 	delay(2);
   13835 
   13836 out:
   13837 	sc->nvm.release(sc);
   13838 	return rv;
   13839 }
   13840 
   13841 /* Using with EERD */
   13842 
   13843 static int
   13844 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13845 {
   13846 	uint32_t attempts = 100000;
   13847 	uint32_t i, reg = 0;
   13848 	int32_t done = -1;
   13849 
   13850 	for (i = 0; i < attempts; i++) {
   13851 		reg = CSR_READ(sc, rw);
   13852 
   13853 		if (reg & EERD_DONE) {
   13854 			done = 0;
   13855 			break;
   13856 		}
   13857 		delay(5);
   13858 	}
   13859 
   13860 	return done;
   13861 }
   13862 
   13863 static int
   13864 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13865 {
   13866 	int i, eerd = 0;
   13867 	int rv;
   13868 
   13869 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13870 		device_xname(sc->sc_dev), __func__));
   13871 
   13872 	rv = sc->nvm.acquire(sc);
   13873 	if (rv != 0)
   13874 		return rv;
   13875 
   13876 	for (i = 0; i < wordcnt; i++) {
   13877 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13878 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13879 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13880 		if (rv != 0) {
   13881 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13882 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13883 			break;
   13884 		}
   13885 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13886 	}
   13887 
   13888 	sc->nvm.release(sc);
   13889 	return rv;
   13890 }
   13891 
   13892 /* Flash */
   13893 
   13894 static int
   13895 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13896 {
   13897 	uint32_t eecd;
   13898 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13899 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13900 	uint32_t nvm_dword = 0;
   13901 	uint8_t sig_byte = 0;
   13902 	int rv;
   13903 
   13904 	switch (sc->sc_type) {
   13905 	case WM_T_PCH_SPT:
   13906 	case WM_T_PCH_CNP:
   13907 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13908 		act_offset = ICH_NVM_SIG_WORD * 2;
   13909 
   13910 		/* Set bank to 0 in case flash read fails. */
   13911 		*bank = 0;
   13912 
   13913 		/* Check bank 0 */
   13914 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13915 		if (rv != 0)
   13916 			return rv;
   13917 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13918 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13919 			*bank = 0;
   13920 			return 0;
   13921 		}
   13922 
   13923 		/* Check bank 1 */
   13924 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13925 		    &nvm_dword);
   13926 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13927 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13928 			*bank = 1;
   13929 			return 0;
   13930 		}
   13931 		aprint_error_dev(sc->sc_dev,
   13932 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13933 		return -1;
   13934 	case WM_T_ICH8:
   13935 	case WM_T_ICH9:
   13936 		eecd = CSR_READ(sc, WMREG_EECD);
   13937 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13938 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13939 			return 0;
   13940 		}
   13941 		/* FALLTHROUGH */
   13942 	default:
   13943 		/* Default to 0 */
   13944 		*bank = 0;
   13945 
   13946 		/* Check bank 0 */
   13947 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13948 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13949 			*bank = 0;
   13950 			return 0;
   13951 		}
   13952 
   13953 		/* Check bank 1 */
   13954 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13955 		    &sig_byte);
   13956 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13957 			*bank = 1;
   13958 			return 0;
   13959 		}
   13960 	}
   13961 
   13962 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13963 		device_xname(sc->sc_dev)));
   13964 	return -1;
   13965 }
   13966 
   13967 /******************************************************************************
   13968  * This function does initial flash setup so that a new read/write/erase cycle
   13969  * can be started.
   13970  *
   13971  * sc - The pointer to the hw structure
   13972  ****************************************************************************/
   13973 static int32_t
   13974 wm_ich8_cycle_init(struct wm_softc *sc)
   13975 {
   13976 	uint16_t hsfsts;
   13977 	int32_t error = 1;
   13978 	int32_t i     = 0;
   13979 
   13980 	if (sc->sc_type >= WM_T_PCH_SPT)
   13981 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13982 	else
   13983 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13984 
   13985 	/* May be check the Flash Des Valid bit in Hw status */
   13986 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13987 		return error;
   13988 
   13989 	/* Clear FCERR in Hw status by writing 1 */
   13990 	/* Clear DAEL in Hw status by writing a 1 */
   13991 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13992 
   13993 	if (sc->sc_type >= WM_T_PCH_SPT)
   13994 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13995 	else
   13996 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13997 
   13998 	/*
   13999 	 * Either we should have a hardware SPI cycle in progress bit to check
   14000 	 * against, in order to start a new cycle or FDONE bit should be
   14001 	 * changed in the hardware so that it is 1 after hardware reset, which
   14002 	 * can then be used as an indication whether a cycle is in progress or
   14003 	 * has been completed .. we should also have some software semaphore
   14004 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14005 	 * threads access to those bits can be sequentiallized or a way so that
   14006 	 * 2 threads don't start the cycle at the same time
   14007 	 */
   14008 
   14009 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14010 		/*
   14011 		 * There is no cycle running at present, so we can start a
   14012 		 * cycle
   14013 		 */
   14014 
   14015 		/* Begin by setting Flash Cycle Done. */
   14016 		hsfsts |= HSFSTS_DONE;
   14017 		if (sc->sc_type >= WM_T_PCH_SPT)
   14018 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14019 			    hsfsts & 0xffffUL);
   14020 		else
   14021 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14022 		error = 0;
   14023 	} else {
   14024 		/*
   14025 		 * Otherwise poll for sometime so the current cycle has a
   14026 		 * chance to end before giving up.
   14027 		 */
   14028 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14029 			if (sc->sc_type >= WM_T_PCH_SPT)
   14030 				hsfsts = ICH8_FLASH_READ32(sc,
   14031 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14032 			else
   14033 				hsfsts = ICH8_FLASH_READ16(sc,
   14034 				    ICH_FLASH_HSFSTS);
   14035 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14036 				error = 0;
   14037 				break;
   14038 			}
   14039 			delay(1);
   14040 		}
   14041 		if (error == 0) {
   14042 			/*
   14043 			 * Successful in waiting for previous cycle to timeout,
   14044 			 * now set the Flash Cycle Done.
   14045 			 */
   14046 			hsfsts |= HSFSTS_DONE;
   14047 			if (sc->sc_type >= WM_T_PCH_SPT)
   14048 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14049 				    hsfsts & 0xffffUL);
   14050 			else
   14051 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14052 				    hsfsts);
   14053 		}
   14054 	}
   14055 	return error;
   14056 }
   14057 
   14058 /******************************************************************************
   14059  * This function starts a flash cycle and waits for its completion
   14060  *
   14061  * sc - The pointer to the hw structure
   14062  ****************************************************************************/
   14063 static int32_t
   14064 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14065 {
   14066 	uint16_t hsflctl;
   14067 	uint16_t hsfsts;
   14068 	int32_t error = 1;
   14069 	uint32_t i = 0;
   14070 
   14071 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14072 	if (sc->sc_type >= WM_T_PCH_SPT)
   14073 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14074 	else
   14075 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14076 	hsflctl |= HSFCTL_GO;
   14077 	if (sc->sc_type >= WM_T_PCH_SPT)
   14078 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14079 		    (uint32_t)hsflctl << 16);
   14080 	else
   14081 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14082 
   14083 	/* Wait till FDONE bit is set to 1 */
   14084 	do {
   14085 		if (sc->sc_type >= WM_T_PCH_SPT)
   14086 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14087 			    & 0xffffUL;
   14088 		else
   14089 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14090 		if (hsfsts & HSFSTS_DONE)
   14091 			break;
   14092 		delay(1);
   14093 		i++;
   14094 	} while (i < timeout);
   14095 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14096 		error = 0;
   14097 
   14098 	return error;
   14099 }
   14100 
   14101 /******************************************************************************
   14102  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14103  *
   14104  * sc - The pointer to the hw structure
   14105  * index - The index of the byte or word to read.
   14106  * size - Size of data to read, 1=byte 2=word, 4=dword
   14107  * data - Pointer to the word to store the value read.
   14108  *****************************************************************************/
   14109 static int32_t
   14110 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14111     uint32_t size, uint32_t *data)
   14112 {
   14113 	uint16_t hsfsts;
   14114 	uint16_t hsflctl;
   14115 	uint32_t flash_linear_address;
   14116 	uint32_t flash_data = 0;
   14117 	int32_t error = 1;
   14118 	int32_t count = 0;
   14119 
   14120 	if (size < 1  || size > 4 || data == 0x0 ||
   14121 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14122 		return error;
   14123 
   14124 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14125 	    sc->sc_ich8_flash_base;
   14126 
   14127 	do {
   14128 		delay(1);
   14129 		/* Steps */
   14130 		error = wm_ich8_cycle_init(sc);
   14131 		if (error)
   14132 			break;
   14133 
   14134 		if (sc->sc_type >= WM_T_PCH_SPT)
   14135 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14136 			    >> 16;
   14137 		else
   14138 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14139 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14140 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14141 		    & HSFCTL_BCOUNT_MASK;
   14142 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14143 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14144 			/*
   14145 			 * In SPT, This register is in Lan memory space, not
   14146 			 * flash. Therefore, only 32 bit access is supported.
   14147 			 */
   14148 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14149 			    (uint32_t)hsflctl << 16);
   14150 		} else
   14151 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14152 
   14153 		/*
   14154 		 * Write the last 24 bits of index into Flash Linear address
   14155 		 * field in Flash Address
   14156 		 */
   14157 		/* TODO: TBD maybe check the index against the size of flash */
   14158 
   14159 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14160 
   14161 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14162 
   14163 		/*
   14164 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14165 		 * the whole sequence a few more times, else read in (shift in)
   14166 		 * the Flash Data0, the order is least significant byte first
   14167 		 * msb to lsb
   14168 		 */
   14169 		if (error == 0) {
   14170 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14171 			if (size == 1)
   14172 				*data = (uint8_t)(flash_data & 0x000000FF);
   14173 			else if (size == 2)
   14174 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14175 			else if (size == 4)
   14176 				*data = (uint32_t)flash_data;
   14177 			break;
   14178 		} else {
   14179 			/*
   14180 			 * If we've gotten here, then things are probably
   14181 			 * completely hosed, but if the error condition is
   14182 			 * detected, it won't hurt to give it another try...
   14183 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14184 			 */
   14185 			if (sc->sc_type >= WM_T_PCH_SPT)
   14186 				hsfsts = ICH8_FLASH_READ32(sc,
   14187 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14188 			else
   14189 				hsfsts = ICH8_FLASH_READ16(sc,
   14190 				    ICH_FLASH_HSFSTS);
   14191 
   14192 			if (hsfsts & HSFSTS_ERR) {
   14193 				/* Repeat for some time before giving up. */
   14194 				continue;
   14195 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14196 				break;
   14197 		}
   14198 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14199 
   14200 	return error;
   14201 }
   14202 
   14203 /******************************************************************************
   14204  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14205  *
   14206  * sc - pointer to wm_hw structure
   14207  * index - The index of the byte to read.
   14208  * data - Pointer to a byte to store the value read.
   14209  *****************************************************************************/
   14210 static int32_t
   14211 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14212 {
   14213 	int32_t status;
   14214 	uint32_t word = 0;
   14215 
   14216 	status = wm_read_ich8_data(sc, index, 1, &word);
   14217 	if (status == 0)
   14218 		*data = (uint8_t)word;
   14219 	else
   14220 		*data = 0;
   14221 
   14222 	return status;
   14223 }
   14224 
   14225 /******************************************************************************
   14226  * Reads a word from the NVM using the ICH8 flash access registers.
   14227  *
   14228  * sc - pointer to wm_hw structure
   14229  * index - The starting byte index of the word to read.
   14230  * data - Pointer to a word to store the value read.
   14231  *****************************************************************************/
   14232 static int32_t
   14233 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14234 {
   14235 	int32_t status;
   14236 	uint32_t word = 0;
   14237 
   14238 	status = wm_read_ich8_data(sc, index, 2, &word);
   14239 	if (status == 0)
   14240 		*data = (uint16_t)word;
   14241 	else
   14242 		*data = 0;
   14243 
   14244 	return status;
   14245 }
   14246 
   14247 /******************************************************************************
   14248  * Reads a dword from the NVM using the ICH8 flash access registers.
   14249  *
   14250  * sc - pointer to wm_hw structure
   14251  * index - The starting byte index of the word to read.
   14252  * data - Pointer to a word to store the value read.
   14253  *****************************************************************************/
   14254 static int32_t
   14255 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14256 {
   14257 	int32_t status;
   14258 
   14259 	status = wm_read_ich8_data(sc, index, 4, data);
   14260 	return status;
   14261 }
   14262 
   14263 /******************************************************************************
   14264  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14265  * register.
   14266  *
   14267  * sc - Struct containing variables accessed by shared code
   14268  * offset - offset of word in the EEPROM to read
   14269  * data - word read from the EEPROM
   14270  * words - number of words to read
   14271  *****************************************************************************/
   14272 static int
   14273 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14274 {
   14275 	int rv;
   14276 	uint32_t flash_bank = 0;
   14277 	uint32_t act_offset = 0;
   14278 	uint32_t bank_offset = 0;
   14279 	uint16_t word = 0;
   14280 	uint16_t i = 0;
   14281 
   14282 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14283 		device_xname(sc->sc_dev), __func__));
   14284 
   14285 	rv = sc->nvm.acquire(sc);
   14286 	if (rv != 0)
   14287 		return rv;
   14288 
   14289 	/*
   14290 	 * We need to know which is the valid flash bank.  In the event
   14291 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14292 	 * managing flash_bank. So it cannot be trusted and needs
   14293 	 * to be updated with each read.
   14294 	 */
   14295 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14296 	if (rv) {
   14297 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14298 			device_xname(sc->sc_dev)));
   14299 		flash_bank = 0;
   14300 	}
   14301 
   14302 	/*
   14303 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14304 	 * size
   14305 	 */
   14306 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14307 
   14308 	for (i = 0; i < words; i++) {
   14309 		/* The NVM part needs a byte offset, hence * 2 */
   14310 		act_offset = bank_offset + ((offset + i) * 2);
   14311 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14312 		if (rv) {
   14313 			aprint_error_dev(sc->sc_dev,
   14314 			    "%s: failed to read NVM\n", __func__);
   14315 			break;
   14316 		}
   14317 		data[i] = word;
   14318 	}
   14319 
   14320 	sc->nvm.release(sc);
   14321 	return rv;
   14322 }
   14323 
   14324 /******************************************************************************
   14325  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14326  * register.
   14327  *
   14328  * sc - Struct containing variables accessed by shared code
   14329  * offset - offset of word in the EEPROM to read
   14330  * data - word read from the EEPROM
   14331  * words - number of words to read
   14332  *****************************************************************************/
   14333 static int
   14334 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14335 {
   14336 	int	 rv;
   14337 	uint32_t flash_bank = 0;
   14338 	uint32_t act_offset = 0;
   14339 	uint32_t bank_offset = 0;
   14340 	uint32_t dword = 0;
   14341 	uint16_t i = 0;
   14342 
   14343 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14344 		device_xname(sc->sc_dev), __func__));
   14345 
   14346 	rv = sc->nvm.acquire(sc);
   14347 	if (rv != 0)
   14348 		return rv;
   14349 
   14350 	/*
   14351 	 * We need to know which is the valid flash bank.  In the event
   14352 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14353 	 * managing flash_bank. So it cannot be trusted and needs
   14354 	 * to be updated with each read.
   14355 	 */
   14356 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14357 	if (rv) {
   14358 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14359 			device_xname(sc->sc_dev)));
   14360 		flash_bank = 0;
   14361 	}
   14362 
   14363 	/*
   14364 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14365 	 * size
   14366 	 */
   14367 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14368 
   14369 	for (i = 0; i < words; i++) {
   14370 		/* The NVM part needs a byte offset, hence * 2 */
   14371 		act_offset = bank_offset + ((offset + i) * 2);
   14372 		/* but we must read dword aligned, so mask ... */
   14373 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14374 		if (rv) {
   14375 			aprint_error_dev(sc->sc_dev,
   14376 			    "%s: failed to read NVM\n", __func__);
   14377 			break;
   14378 		}
   14379 		/* ... and pick out low or high word */
   14380 		if ((act_offset & 0x2) == 0)
   14381 			data[i] = (uint16_t)(dword & 0xFFFF);
   14382 		else
   14383 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14384 	}
   14385 
   14386 	sc->nvm.release(sc);
   14387 	return rv;
   14388 }
   14389 
   14390 /* iNVM */
   14391 
   14392 static int
   14393 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14394 {
   14395 	int32_t	 rv = 0;
   14396 	uint32_t invm_dword;
   14397 	uint16_t i;
   14398 	uint8_t record_type, word_address;
   14399 
   14400 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14401 		device_xname(sc->sc_dev), __func__));
   14402 
   14403 	for (i = 0; i < INVM_SIZE; i++) {
   14404 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14405 		/* Get record type */
   14406 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14407 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14408 			break;
   14409 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14410 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14411 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14412 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14413 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14414 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14415 			if (word_address == address) {
   14416 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14417 				rv = 0;
   14418 				break;
   14419 			}
   14420 		}
   14421 	}
   14422 
   14423 	return rv;
   14424 }
   14425 
   14426 static int
   14427 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14428 {
   14429 	int i, rv;
   14430 
   14431 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14432 		device_xname(sc->sc_dev), __func__));
   14433 
   14434 	rv = sc->nvm.acquire(sc);
   14435 	if (rv != 0)
   14436 		return rv;
   14437 
   14438 	for (i = 0; i < words; i++) {
   14439 		switch (offset + i) {
   14440 		case NVM_OFF_MACADDR:
   14441 		case NVM_OFF_MACADDR1:
   14442 		case NVM_OFF_MACADDR2:
   14443 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14444 			if (rv != 0) {
   14445 				data[i] = 0xffff;
   14446 				rv = -1;
   14447 			}
   14448 			break;
   14449 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14450 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14451 			if (rv != 0) {
   14452 				*data = INVM_DEFAULT_AL;
   14453 				rv = 0;
   14454 			}
   14455 			break;
   14456 		case NVM_OFF_CFG2:
   14457 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14458 			if (rv != 0) {
   14459 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14460 				rv = 0;
   14461 			}
   14462 			break;
   14463 		case NVM_OFF_CFG4:
   14464 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14465 			if (rv != 0) {
   14466 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14467 				rv = 0;
   14468 			}
   14469 			break;
   14470 		case NVM_OFF_LED_1_CFG:
   14471 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14472 			if (rv != 0) {
   14473 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14474 				rv = 0;
   14475 			}
   14476 			break;
   14477 		case NVM_OFF_LED_0_2_CFG:
   14478 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14479 			if (rv != 0) {
   14480 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14481 				rv = 0;
   14482 			}
   14483 			break;
   14484 		case NVM_OFF_ID_LED_SETTINGS:
   14485 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14486 			if (rv != 0) {
   14487 				*data = ID_LED_RESERVED_FFFF;
   14488 				rv = 0;
   14489 			}
   14490 			break;
   14491 		default:
   14492 			DPRINTF(sc, WM_DEBUG_NVM,
   14493 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14494 			*data = NVM_RESERVED_WORD;
   14495 			break;
   14496 		}
   14497 	}
   14498 
   14499 	sc->nvm.release(sc);
   14500 	return rv;
   14501 }
   14502 
   14503 /* Lock, detecting NVM type, validate checksum, version and read */
   14504 
   14505 static int
   14506 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14507 {
   14508 	uint32_t eecd = 0;
   14509 
   14510 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14511 	    || sc->sc_type == WM_T_82583) {
   14512 		eecd = CSR_READ(sc, WMREG_EECD);
   14513 
   14514 		/* Isolate bits 15 & 16 */
   14515 		eecd = ((eecd >> 15) & 0x03);
   14516 
   14517 		/* If both bits are set, device is Flash type */
   14518 		if (eecd == 0x03)
   14519 			return 0;
   14520 	}
   14521 	return 1;
   14522 }
   14523 
   14524 static int
   14525 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14526 {
   14527 	uint32_t eec;
   14528 
   14529 	eec = CSR_READ(sc, WMREG_EEC);
   14530 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14531 		return 1;
   14532 
   14533 	return 0;
   14534 }
   14535 
   14536 /*
   14537  * wm_nvm_validate_checksum
   14538  *
   14539  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14540  */
   14541 static int
   14542 wm_nvm_validate_checksum(struct wm_softc *sc)
   14543 {
   14544 	uint16_t checksum;
   14545 	uint16_t eeprom_data;
   14546 #ifdef WM_DEBUG
   14547 	uint16_t csum_wordaddr, valid_checksum;
   14548 #endif
   14549 	int i;
   14550 
   14551 	checksum = 0;
   14552 
   14553 	/* Don't check for I211 */
   14554 	if (sc->sc_type == WM_T_I211)
   14555 		return 0;
   14556 
   14557 #ifdef WM_DEBUG
   14558 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14559 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14560 		csum_wordaddr = NVM_OFF_COMPAT;
   14561 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14562 	} else {
   14563 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14564 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14565 	}
   14566 
   14567 	/* Dump EEPROM image for debug */
   14568 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14569 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14570 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14571 		/* XXX PCH_SPT? */
   14572 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14573 		if ((eeprom_data & valid_checksum) == 0)
   14574 			DPRINTF(sc, WM_DEBUG_NVM,
   14575 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14576 				device_xname(sc->sc_dev), eeprom_data,
   14577 				    valid_checksum));
   14578 	}
   14579 
   14580 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14581 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14582 		for (i = 0; i < NVM_SIZE; i++) {
   14583 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14584 				printf("XXXX ");
   14585 			else
   14586 				printf("%04hx ", eeprom_data);
   14587 			if (i % 8 == 7)
   14588 				printf("\n");
   14589 		}
   14590 	}
   14591 
   14592 #endif /* WM_DEBUG */
   14593 
   14594 	for (i = 0; i < NVM_SIZE; i++) {
   14595 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14596 			return -1;
   14597 		checksum += eeprom_data;
   14598 	}
   14599 
   14600 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14601 #ifdef WM_DEBUG
   14602 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14603 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14604 #endif
   14605 	}
   14606 
   14607 	return 0;
   14608 }
   14609 
   14610 static void
   14611 wm_nvm_version_invm(struct wm_softc *sc)
   14612 {
   14613 	uint32_t dword;
   14614 
   14615 	/*
   14616 	 * Linux's code to decode version is very strange, so we don't
   14617 	 * obey that algorithm and just use word 61 as the document.
   14618 	 * Perhaps it's not perfect though...
   14619 	 *
   14620 	 * Example:
   14621 	 *
   14622 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14623 	 */
   14624 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14625 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14626 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14627 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14628 }
   14629 
   14630 static void
   14631 wm_nvm_version(struct wm_softc *sc)
   14632 {
   14633 	uint16_t major, minor, build, patch;
   14634 	uint16_t uid0, uid1;
   14635 	uint16_t nvm_data;
   14636 	uint16_t off;
   14637 	bool check_version = false;
   14638 	bool check_optionrom = false;
   14639 	bool have_build = false;
   14640 	bool have_uid = true;
   14641 
   14642 	/*
   14643 	 * Version format:
   14644 	 *
   14645 	 * XYYZ
   14646 	 * X0YZ
   14647 	 * X0YY
   14648 	 *
   14649 	 * Example:
   14650 	 *
   14651 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14652 	 *	82571	0x50a6	5.10.6?
   14653 	 *	82572	0x506a	5.6.10?
   14654 	 *	82572EI	0x5069	5.6.9?
   14655 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14656 	 *		0x2013	2.1.3?
   14657 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14658 	 * ICH8+82567	0x0040	0.4.0?
   14659 	 * ICH9+82566	0x1040	1.4.0?
   14660 	 *ICH10+82567	0x0043	0.4.3?
   14661 	 *  PCH+82577	0x00c1	0.12.1?
   14662 	 * PCH2+82579	0x00d3	0.13.3?
   14663 	 *		0x00d4	0.13.4?
   14664 	 *  LPT+I218	0x0023	0.2.3?
   14665 	 *  SPT+I219	0x0084	0.8.4?
   14666 	 *  CNP+I219	0x0054	0.5.4?
   14667 	 */
   14668 
   14669 	/*
   14670 	 * XXX
   14671 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14672 	 * I've never seen real 82574 hardware with such small SPI ROM.
   14673 	 */
   14674 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14675 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14676 		have_uid = false;
   14677 
   14678 	switch (sc->sc_type) {
   14679 	case WM_T_82571:
   14680 	case WM_T_82572:
   14681 	case WM_T_82574:
   14682 	case WM_T_82583:
   14683 		check_version = true;
   14684 		check_optionrom = true;
   14685 		have_build = true;
   14686 		break;
   14687 	case WM_T_ICH8:
   14688 	case WM_T_ICH9:
   14689 	case WM_T_ICH10:
   14690 	case WM_T_PCH:
   14691 	case WM_T_PCH2:
   14692 	case WM_T_PCH_LPT:
   14693 	case WM_T_PCH_SPT:
   14694 	case WM_T_PCH_CNP:
   14695 		check_version = true;
   14696 		have_build = true;
   14697 		have_uid = false;
   14698 		break;
   14699 	case WM_T_82575:
   14700 	case WM_T_82576:
   14701 	case WM_T_82580:
   14702 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14703 			check_version = true;
   14704 		break;
   14705 	case WM_T_I211:
   14706 		wm_nvm_version_invm(sc);
   14707 		have_uid = false;
   14708 		goto printver;
   14709 	case WM_T_I210:
   14710 		if (!wm_nvm_flash_presence_i210(sc)) {
   14711 			wm_nvm_version_invm(sc);
   14712 			have_uid = false;
   14713 			goto printver;
   14714 		}
   14715 		/* FALLTHROUGH */
   14716 	case WM_T_I350:
   14717 	case WM_T_I354:
   14718 		check_version = true;
   14719 		check_optionrom = true;
   14720 		break;
   14721 	default:
   14722 		return;
   14723 	}
   14724 	if (check_version
   14725 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14726 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14727 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14728 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14729 			build = nvm_data & NVM_BUILD_MASK;
   14730 			have_build = true;
   14731 		} else
   14732 			minor = nvm_data & 0x00ff;
   14733 
   14734 		/* Decimal */
   14735 		minor = (minor / 16) * 10 + (minor % 16);
   14736 		sc->sc_nvm_ver_major = major;
   14737 		sc->sc_nvm_ver_minor = minor;
   14738 
   14739 printver:
   14740 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14741 		    sc->sc_nvm_ver_minor);
   14742 		if (have_build) {
   14743 			sc->sc_nvm_ver_build = build;
   14744 			aprint_verbose(".%d", build);
   14745 		}
   14746 	}
   14747 
   14748 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14749 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14750 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14751 		/* Option ROM Version */
   14752 		if ((off != 0x0000) && (off != 0xffff)) {
   14753 			int rv;
   14754 
   14755 			off += NVM_COMBO_VER_OFF;
   14756 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14757 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14758 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14759 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14760 				/* 16bits */
   14761 				major = uid0 >> 8;
   14762 				build = (uid0 << 8) | (uid1 >> 8);
   14763 				patch = uid1 & 0x00ff;
   14764 				aprint_verbose(", option ROM Version %d.%d.%d",
   14765 				    major, build, patch);
   14766 			}
   14767 		}
   14768 	}
   14769 
   14770 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14771 		aprint_verbose(", Image Unique ID %08x",
   14772 		    ((uint32_t)uid1 << 16) | uid0);
   14773 }
   14774 
   14775 /*
   14776  * wm_nvm_read:
   14777  *
   14778  *	Read data from the serial EEPROM.
   14779  */
   14780 static int
   14781 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14782 {
   14783 	int rv;
   14784 
   14785 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14786 		device_xname(sc->sc_dev), __func__));
   14787 
   14788 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14789 		return -1;
   14790 
   14791 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14792 
   14793 	return rv;
   14794 }
   14795 
   14796 /*
   14797  * Hardware semaphores.
   14798  * Very complexed...
   14799  */
   14800 
   14801 static int
   14802 wm_get_null(struct wm_softc *sc)
   14803 {
   14804 
   14805 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14806 		device_xname(sc->sc_dev), __func__));
   14807 	return 0;
   14808 }
   14809 
   14810 static void
   14811 wm_put_null(struct wm_softc *sc)
   14812 {
   14813 
   14814 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14815 		device_xname(sc->sc_dev), __func__));
   14816 	return;
   14817 }
   14818 
   14819 static int
   14820 wm_get_eecd(struct wm_softc *sc)
   14821 {
   14822 	uint32_t reg;
   14823 	int x;
   14824 
   14825 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14826 		device_xname(sc->sc_dev), __func__));
   14827 
   14828 	reg = CSR_READ(sc, WMREG_EECD);
   14829 
   14830 	/* Request EEPROM access. */
   14831 	reg |= EECD_EE_REQ;
   14832 	CSR_WRITE(sc, WMREG_EECD, reg);
   14833 
   14834 	/* ..and wait for it to be granted. */
   14835 	for (x = 0; x < 1000; x++) {
   14836 		reg = CSR_READ(sc, WMREG_EECD);
   14837 		if (reg & EECD_EE_GNT)
   14838 			break;
   14839 		delay(5);
   14840 	}
   14841 	if ((reg & EECD_EE_GNT) == 0) {
   14842 		aprint_error_dev(sc->sc_dev,
   14843 		    "could not acquire EEPROM GNT\n");
   14844 		reg &= ~EECD_EE_REQ;
   14845 		CSR_WRITE(sc, WMREG_EECD, reg);
   14846 		return -1;
   14847 	}
   14848 
   14849 	return 0;
   14850 }
   14851 
   14852 static void
   14853 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14854 {
   14855 
   14856 	*eecd |= EECD_SK;
   14857 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14858 	CSR_WRITE_FLUSH(sc);
   14859 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14860 		delay(1);
   14861 	else
   14862 		delay(50);
   14863 }
   14864 
   14865 static void
   14866 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14867 {
   14868 
   14869 	*eecd &= ~EECD_SK;
   14870 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14871 	CSR_WRITE_FLUSH(sc);
   14872 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14873 		delay(1);
   14874 	else
   14875 		delay(50);
   14876 }
   14877 
   14878 static void
   14879 wm_put_eecd(struct wm_softc *sc)
   14880 {
   14881 	uint32_t reg;
   14882 
   14883 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14884 		device_xname(sc->sc_dev), __func__));
   14885 
   14886 	/* Stop nvm */
   14887 	reg = CSR_READ(sc, WMREG_EECD);
   14888 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14889 		/* Pull CS high */
   14890 		reg |= EECD_CS;
   14891 		wm_nvm_eec_clock_lower(sc, &reg);
   14892 	} else {
   14893 		/* CS on Microwire is active-high */
   14894 		reg &= ~(EECD_CS | EECD_DI);
   14895 		CSR_WRITE(sc, WMREG_EECD, reg);
   14896 		wm_nvm_eec_clock_raise(sc, &reg);
   14897 		wm_nvm_eec_clock_lower(sc, &reg);
   14898 	}
   14899 
   14900 	reg = CSR_READ(sc, WMREG_EECD);
   14901 	reg &= ~EECD_EE_REQ;
   14902 	CSR_WRITE(sc, WMREG_EECD, reg);
   14903 
   14904 	return;
   14905 }
   14906 
   14907 /*
   14908  * Get hardware semaphore.
   14909  * Same as e1000_get_hw_semaphore_generic()
   14910  */
   14911 static int
   14912 wm_get_swsm_semaphore(struct wm_softc *sc)
   14913 {
   14914 	int32_t timeout;
   14915 	uint32_t swsm;
   14916 
   14917 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14918 		device_xname(sc->sc_dev), __func__));
   14919 	KASSERT(sc->sc_nvm_wordsize > 0);
   14920 
   14921 retry:
   14922 	/* Get the SW semaphore. */
   14923 	timeout = sc->sc_nvm_wordsize + 1;
   14924 	while (timeout) {
   14925 		swsm = CSR_READ(sc, WMREG_SWSM);
   14926 
   14927 		if ((swsm & SWSM_SMBI) == 0)
   14928 			break;
   14929 
   14930 		delay(50);
   14931 		timeout--;
   14932 	}
   14933 
   14934 	if (timeout == 0) {
   14935 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14936 			/*
   14937 			 * In rare circumstances, the SW semaphore may already
   14938 			 * be held unintentionally. Clear the semaphore once
   14939 			 * before giving up.
   14940 			 */
   14941 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14942 			wm_put_swsm_semaphore(sc);
   14943 			goto retry;
   14944 		}
   14945 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   14946 		return -1;
   14947 	}
   14948 
   14949 	/* Get the FW semaphore. */
   14950 	timeout = sc->sc_nvm_wordsize + 1;
   14951 	while (timeout) {
   14952 		swsm = CSR_READ(sc, WMREG_SWSM);
   14953 		swsm |= SWSM_SWESMBI;
   14954 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14955 		/* If we managed to set the bit we got the semaphore. */
   14956 		swsm = CSR_READ(sc, WMREG_SWSM);
   14957 		if (swsm & SWSM_SWESMBI)
   14958 			break;
   14959 
   14960 		delay(50);
   14961 		timeout--;
   14962 	}
   14963 
   14964 	if (timeout == 0) {
   14965 		aprint_error_dev(sc->sc_dev,
   14966 		    "could not acquire SWSM SWESMBI\n");
   14967 		/* Release semaphores */
   14968 		wm_put_swsm_semaphore(sc);
   14969 		return -1;
   14970 	}
   14971 	return 0;
   14972 }
   14973 
   14974 /*
   14975  * Put hardware semaphore.
   14976  * Same as e1000_put_hw_semaphore_generic()
   14977  */
   14978 static void
   14979 wm_put_swsm_semaphore(struct wm_softc *sc)
   14980 {
   14981 	uint32_t swsm;
   14982 
   14983 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14984 		device_xname(sc->sc_dev), __func__));
   14985 
   14986 	swsm = CSR_READ(sc, WMREG_SWSM);
   14987 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14988 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14989 }
   14990 
   14991 /*
   14992  * Get SW/FW semaphore.
   14993  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14994  */
   14995 static int
   14996 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14997 {
   14998 	uint32_t swfw_sync;
   14999 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15000 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15001 	int timeout;
   15002 
   15003 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15004 		device_xname(sc->sc_dev), __func__));
   15005 
   15006 	if (sc->sc_type == WM_T_80003)
   15007 		timeout = 50;
   15008 	else
   15009 		timeout = 200;
   15010 
   15011 	while (timeout) {
   15012 		if (wm_get_swsm_semaphore(sc)) {
   15013 			aprint_error_dev(sc->sc_dev,
   15014 			    "%s: failed to get semaphore\n",
   15015 			    __func__);
   15016 			return -1;
   15017 		}
   15018 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15019 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15020 			swfw_sync |= swmask;
   15021 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15022 			wm_put_swsm_semaphore(sc);
   15023 			return 0;
   15024 		}
   15025 		wm_put_swsm_semaphore(sc);
   15026 		delay(5000);
   15027 		timeout--;
   15028 	}
   15029 	device_printf(sc->sc_dev,
   15030 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15031 	    mask, swfw_sync);
   15032 	return -1;
   15033 }
   15034 
   15035 static void
   15036 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15037 {
   15038 	uint32_t swfw_sync;
   15039 
   15040 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15041 		device_xname(sc->sc_dev), __func__));
   15042 
   15043 	while (wm_get_swsm_semaphore(sc) != 0)
   15044 		continue;
   15045 
   15046 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15047 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15048 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15049 
   15050 	wm_put_swsm_semaphore(sc);
   15051 }
   15052 
   15053 static int
   15054 wm_get_nvm_80003(struct wm_softc *sc)
   15055 {
   15056 	int rv;
   15057 
   15058 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15059 		device_xname(sc->sc_dev), __func__));
   15060 
   15061 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15062 		aprint_error_dev(sc->sc_dev,
   15063 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15064 		return rv;
   15065 	}
   15066 
   15067 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15068 	    && (rv = wm_get_eecd(sc)) != 0) {
   15069 		aprint_error_dev(sc->sc_dev,
   15070 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15071 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15072 		return rv;
   15073 	}
   15074 
   15075 	return 0;
   15076 }
   15077 
   15078 static void
   15079 wm_put_nvm_80003(struct wm_softc *sc)
   15080 {
   15081 
   15082 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15083 		device_xname(sc->sc_dev), __func__));
   15084 
   15085 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15086 		wm_put_eecd(sc);
   15087 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15088 }
   15089 
   15090 static int
   15091 wm_get_nvm_82571(struct wm_softc *sc)
   15092 {
   15093 	int rv;
   15094 
   15095 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15096 		device_xname(sc->sc_dev), __func__));
   15097 
   15098 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15099 		return rv;
   15100 
   15101 	switch (sc->sc_type) {
   15102 	case WM_T_82573:
   15103 		break;
   15104 	default:
   15105 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15106 			rv = wm_get_eecd(sc);
   15107 		break;
   15108 	}
   15109 
   15110 	if (rv != 0) {
   15111 		aprint_error_dev(sc->sc_dev,
   15112 		    "%s: failed to get semaphore\n",
   15113 		    __func__);
   15114 		wm_put_swsm_semaphore(sc);
   15115 	}
   15116 
   15117 	return rv;
   15118 }
   15119 
   15120 static void
   15121 wm_put_nvm_82571(struct wm_softc *sc)
   15122 {
   15123 
   15124 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15125 		device_xname(sc->sc_dev), __func__));
   15126 
   15127 	switch (sc->sc_type) {
   15128 	case WM_T_82573:
   15129 		break;
   15130 	default:
   15131 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15132 			wm_put_eecd(sc);
   15133 		break;
   15134 	}
   15135 
   15136 	wm_put_swsm_semaphore(sc);
   15137 }
   15138 
   15139 static int
   15140 wm_get_phy_82575(struct wm_softc *sc)
   15141 {
   15142 
   15143 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15144 		device_xname(sc->sc_dev), __func__));
   15145 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15146 }
   15147 
   15148 static void
   15149 wm_put_phy_82575(struct wm_softc *sc)
   15150 {
   15151 
   15152 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15153 		device_xname(sc->sc_dev), __func__));
   15154 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15155 }
   15156 
   15157 static int
   15158 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15159 {
   15160 	uint32_t ext_ctrl;
   15161 	int timeout = 200;
   15162 
   15163 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15164 		device_xname(sc->sc_dev), __func__));
   15165 
   15166 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15167 	for (timeout = 0; timeout < 200; timeout++) {
   15168 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15169 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15170 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15171 
   15172 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15173 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15174 			return 0;
   15175 		delay(5000);
   15176 	}
   15177 	device_printf(sc->sc_dev,
   15178 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15179 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15180 	return -1;
   15181 }
   15182 
   15183 static void
   15184 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15185 {
   15186 	uint32_t ext_ctrl;
   15187 
   15188 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15189 		device_xname(sc->sc_dev), __func__));
   15190 
   15191 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15192 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15193 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15194 
   15195 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15196 }
   15197 
   15198 static int
   15199 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15200 {
   15201 	uint32_t ext_ctrl;
   15202 	int timeout;
   15203 
   15204 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15205 		device_xname(sc->sc_dev), __func__));
   15206 	mutex_enter(sc->sc_ich_phymtx);
   15207 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15208 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15209 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15210 			break;
   15211 		delay(1000);
   15212 	}
   15213 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15214 		device_printf(sc->sc_dev,
   15215 		    "SW has already locked the resource\n");
   15216 		goto out;
   15217 	}
   15218 
   15219 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15220 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15221 	for (timeout = 0; timeout < 1000; timeout++) {
   15222 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15223 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15224 			break;
   15225 		delay(1000);
   15226 	}
   15227 	if (timeout >= 1000) {
   15228 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15229 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15230 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15231 		goto out;
   15232 	}
   15233 	return 0;
   15234 
   15235 out:
   15236 	mutex_exit(sc->sc_ich_phymtx);
   15237 	return -1;
   15238 }
   15239 
   15240 static void
   15241 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15242 {
   15243 	uint32_t ext_ctrl;
   15244 
   15245 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15246 		device_xname(sc->sc_dev), __func__));
   15247 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15248 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15249 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15250 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15251 	} else
   15252 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15253 
   15254 	mutex_exit(sc->sc_ich_phymtx);
   15255 }
   15256 
   15257 static int
   15258 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15259 {
   15260 
   15261 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15262 		device_xname(sc->sc_dev), __func__));
   15263 	mutex_enter(sc->sc_ich_nvmmtx);
   15264 
   15265 	return 0;
   15266 }
   15267 
   15268 static void
   15269 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15270 {
   15271 
   15272 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15273 		device_xname(sc->sc_dev), __func__));
   15274 	mutex_exit(sc->sc_ich_nvmmtx);
   15275 }
   15276 
   15277 static int
   15278 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15279 {
   15280 	int i = 0;
   15281 	uint32_t reg;
   15282 
   15283 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15284 		device_xname(sc->sc_dev), __func__));
   15285 
   15286 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15287 	do {
   15288 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15289 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15290 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15291 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15292 			break;
   15293 		delay(2*1000);
   15294 		i++;
   15295 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15296 
   15297 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15298 		wm_put_hw_semaphore_82573(sc);
   15299 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15300 		    device_xname(sc->sc_dev));
   15301 		return -1;
   15302 	}
   15303 
   15304 	return 0;
   15305 }
   15306 
   15307 static void
   15308 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15309 {
   15310 	uint32_t reg;
   15311 
   15312 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15313 		device_xname(sc->sc_dev), __func__));
   15314 
   15315 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15316 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15317 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15318 }
   15319 
   15320 /*
   15321  * Management mode and power management related subroutines.
   15322  * BMC, AMT, suspend/resume and EEE.
   15323  */
   15324 
   15325 #ifdef WM_WOL
   15326 static int
   15327 wm_check_mng_mode(struct wm_softc *sc)
   15328 {
   15329 	int rv;
   15330 
   15331 	switch (sc->sc_type) {
   15332 	case WM_T_ICH8:
   15333 	case WM_T_ICH9:
   15334 	case WM_T_ICH10:
   15335 	case WM_T_PCH:
   15336 	case WM_T_PCH2:
   15337 	case WM_T_PCH_LPT:
   15338 	case WM_T_PCH_SPT:
   15339 	case WM_T_PCH_CNP:
   15340 		rv = wm_check_mng_mode_ich8lan(sc);
   15341 		break;
   15342 	case WM_T_82574:
   15343 	case WM_T_82583:
   15344 		rv = wm_check_mng_mode_82574(sc);
   15345 		break;
   15346 	case WM_T_82571:
   15347 	case WM_T_82572:
   15348 	case WM_T_82573:
   15349 	case WM_T_80003:
   15350 		rv = wm_check_mng_mode_generic(sc);
   15351 		break;
   15352 	default:
   15353 		/* Noting to do */
   15354 		rv = 0;
   15355 		break;
   15356 	}
   15357 
   15358 	return rv;
   15359 }
   15360 
   15361 static int
   15362 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15363 {
   15364 	uint32_t fwsm;
   15365 
   15366 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15367 
   15368 	if (((fwsm & FWSM_FW_VALID) != 0)
   15369 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15370 		return 1;
   15371 
   15372 	return 0;
   15373 }
   15374 
   15375 static int
   15376 wm_check_mng_mode_82574(struct wm_softc *sc)
   15377 {
   15378 	uint16_t data;
   15379 
   15380 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15381 
   15382 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15383 		return 1;
   15384 
   15385 	return 0;
   15386 }
   15387 
   15388 static int
   15389 wm_check_mng_mode_generic(struct wm_softc *sc)
   15390 {
   15391 	uint32_t fwsm;
   15392 
   15393 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15394 
   15395 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15396 		return 1;
   15397 
   15398 	return 0;
   15399 }
   15400 #endif /* WM_WOL */
   15401 
   15402 static int
   15403 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15404 {
   15405 	uint32_t manc, fwsm, factps;
   15406 
   15407 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15408 		return 0;
   15409 
   15410 	manc = CSR_READ(sc, WMREG_MANC);
   15411 
   15412 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15413 		device_xname(sc->sc_dev), manc));
   15414 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15415 		return 0;
   15416 
   15417 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15418 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15419 		factps = CSR_READ(sc, WMREG_FACTPS);
   15420 		if (((factps & FACTPS_MNGCG) == 0)
   15421 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15422 			return 1;
   15423 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15424 		uint16_t data;
   15425 
   15426 		factps = CSR_READ(sc, WMREG_FACTPS);
   15427 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15428 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15429 			device_xname(sc->sc_dev), factps, data));
   15430 		if (((factps & FACTPS_MNGCG) == 0)
   15431 		    && ((data & NVM_CFG2_MNGM_MASK)
   15432 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15433 			return 1;
   15434 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15435 	    && ((manc & MANC_ASF_EN) == 0))
   15436 		return 1;
   15437 
   15438 	return 0;
   15439 }
   15440 
   15441 static bool
   15442 wm_phy_resetisblocked(struct wm_softc *sc)
   15443 {
   15444 	bool blocked = false;
   15445 	uint32_t reg;
   15446 	int i = 0;
   15447 
   15448 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15449 		device_xname(sc->sc_dev), __func__));
   15450 
   15451 	switch (sc->sc_type) {
   15452 	case WM_T_ICH8:
   15453 	case WM_T_ICH9:
   15454 	case WM_T_ICH10:
   15455 	case WM_T_PCH:
   15456 	case WM_T_PCH2:
   15457 	case WM_T_PCH_LPT:
   15458 	case WM_T_PCH_SPT:
   15459 	case WM_T_PCH_CNP:
   15460 		do {
   15461 			reg = CSR_READ(sc, WMREG_FWSM);
   15462 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15463 				blocked = true;
   15464 				delay(10*1000);
   15465 				continue;
   15466 			}
   15467 			blocked = false;
   15468 		} while (blocked && (i++ < 30));
   15469 		return blocked;
   15470 		break;
   15471 	case WM_T_82571:
   15472 	case WM_T_82572:
   15473 	case WM_T_82573:
   15474 	case WM_T_82574:
   15475 	case WM_T_82583:
   15476 	case WM_T_80003:
   15477 		reg = CSR_READ(sc, WMREG_MANC);
   15478 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15479 			return true;
   15480 		else
   15481 			return false;
   15482 		break;
   15483 	default:
   15484 		/* No problem */
   15485 		break;
   15486 	}
   15487 
   15488 	return false;
   15489 }
   15490 
   15491 static void
   15492 wm_get_hw_control(struct wm_softc *sc)
   15493 {
   15494 	uint32_t reg;
   15495 
   15496 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15497 		device_xname(sc->sc_dev), __func__));
   15498 
   15499 	if (sc->sc_type == WM_T_82573) {
   15500 		reg = CSR_READ(sc, WMREG_SWSM);
   15501 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15502 	} else if (sc->sc_type >= WM_T_82571) {
   15503 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15504 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15505 	}
   15506 }
   15507 
   15508 static void
   15509 wm_release_hw_control(struct wm_softc *sc)
   15510 {
   15511 	uint32_t reg;
   15512 
   15513 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15514 		device_xname(sc->sc_dev), __func__));
   15515 
   15516 	if (sc->sc_type == WM_T_82573) {
   15517 		reg = CSR_READ(sc, WMREG_SWSM);
   15518 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15519 	} else if (sc->sc_type >= WM_T_82571) {
   15520 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15521 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15522 	}
   15523 }
   15524 
   15525 static void
   15526 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15527 {
   15528 	uint32_t reg;
   15529 
   15530 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15531 		device_xname(sc->sc_dev), __func__));
   15532 
   15533 	if (sc->sc_type < WM_T_PCH2)
   15534 		return;
   15535 
   15536 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15537 
   15538 	if (gate)
   15539 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15540 	else
   15541 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15542 
   15543 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15544 }
   15545 
   15546 static int
   15547 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15548 {
   15549 	uint32_t fwsm, reg;
   15550 	int rv;
   15551 
   15552 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15553 		device_xname(sc->sc_dev), __func__));
   15554 
   15555 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15556 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15557 
   15558 	/* Disable ULP */
   15559 	wm_ulp_disable(sc);
   15560 
   15561 	/* Acquire PHY semaphore */
   15562 	rv = sc->phy.acquire(sc);
   15563 	if (rv != 0) {
   15564 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15565 		device_xname(sc->sc_dev), __func__));
   15566 		return rv;
   15567 	}
   15568 
   15569 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15570 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15571 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15572 	 */
   15573 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15574 	switch (sc->sc_type) {
   15575 	case WM_T_PCH_LPT:
   15576 	case WM_T_PCH_SPT:
   15577 	case WM_T_PCH_CNP:
   15578 		if (wm_phy_is_accessible_pchlan(sc))
   15579 			break;
   15580 
   15581 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15582 		 * forcing MAC to SMBus mode first.
   15583 		 */
   15584 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15585 		reg |= CTRL_EXT_FORCE_SMBUS;
   15586 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15587 #if 0
   15588 		/* XXX Isn't this required??? */
   15589 		CSR_WRITE_FLUSH(sc);
   15590 #endif
   15591 		/* Wait 50 milliseconds for MAC to finish any retries
   15592 		 * that it might be trying to perform from previous
   15593 		 * attempts to acknowledge any phy read requests.
   15594 		 */
   15595 		delay(50 * 1000);
   15596 		/* FALLTHROUGH */
   15597 	case WM_T_PCH2:
   15598 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15599 			break;
   15600 		/* FALLTHROUGH */
   15601 	case WM_T_PCH:
   15602 		if (sc->sc_type == WM_T_PCH)
   15603 			if ((fwsm & FWSM_FW_VALID) != 0)
   15604 				break;
   15605 
   15606 		if (wm_phy_resetisblocked(sc) == true) {
   15607 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   15608 			break;
   15609 		}
   15610 
   15611 		/* Toggle LANPHYPC Value bit */
   15612 		wm_toggle_lanphypc_pch_lpt(sc);
   15613 
   15614 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15615 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15616 				break;
   15617 
   15618 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15619 			 * so ensure that the MAC is also out of SMBus mode
   15620 			 */
   15621 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15622 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15623 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15624 
   15625 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15626 				break;
   15627 			rv = -1;
   15628 		}
   15629 		break;
   15630 	default:
   15631 		break;
   15632 	}
   15633 
   15634 	/* Release semaphore */
   15635 	sc->phy.release(sc);
   15636 
   15637 	if (rv == 0) {
   15638 		/* Check to see if able to reset PHY.  Print error if not */
   15639 		if (wm_phy_resetisblocked(sc)) {
   15640 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15641 			goto out;
   15642 		}
   15643 
   15644 		/* Reset the PHY before any access to it.  Doing so, ensures
   15645 		 * that the PHY is in a known good state before we read/write
   15646 		 * PHY registers.  The generic reset is sufficient here,
   15647 		 * because we haven't determined the PHY type yet.
   15648 		 */
   15649 		if (wm_reset_phy(sc) != 0)
   15650 			goto out;
   15651 
   15652 		/* On a successful reset, possibly need to wait for the PHY
   15653 		 * to quiesce to an accessible state before returning control
   15654 		 * to the calling function.  If the PHY does not quiesce, then
   15655 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15656 		 *  the PHY is in.
   15657 		 */
   15658 		if (wm_phy_resetisblocked(sc))
   15659 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15660 	}
   15661 
   15662 out:
   15663 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15664 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15665 		delay(10*1000);
   15666 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15667 	}
   15668 
   15669 	return 0;
   15670 }
   15671 
   15672 static void
   15673 wm_init_manageability(struct wm_softc *sc)
   15674 {
   15675 
   15676 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15677 		device_xname(sc->sc_dev), __func__));
   15678 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   15679 
   15680 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15681 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15682 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15683 
   15684 		/* Disable hardware interception of ARP */
   15685 		manc &= ~MANC_ARP_EN;
   15686 
   15687 		/* Enable receiving management packets to the host */
   15688 		if (sc->sc_type >= WM_T_82571) {
   15689 			manc |= MANC_EN_MNG2HOST;
   15690 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15691 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15692 		}
   15693 
   15694 		CSR_WRITE(sc, WMREG_MANC, manc);
   15695 	}
   15696 }
   15697 
   15698 static void
   15699 wm_release_manageability(struct wm_softc *sc)
   15700 {
   15701 
   15702 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15703 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15704 
   15705 		manc |= MANC_ARP_EN;
   15706 		if (sc->sc_type >= WM_T_82571)
   15707 			manc &= ~MANC_EN_MNG2HOST;
   15708 
   15709 		CSR_WRITE(sc, WMREG_MANC, manc);
   15710 	}
   15711 }
   15712 
   15713 static void
   15714 wm_get_wakeup(struct wm_softc *sc)
   15715 {
   15716 
   15717 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15718 	switch (sc->sc_type) {
   15719 	case WM_T_82573:
   15720 	case WM_T_82583:
   15721 		sc->sc_flags |= WM_F_HAS_AMT;
   15722 		/* FALLTHROUGH */
   15723 	case WM_T_80003:
   15724 	case WM_T_82575:
   15725 	case WM_T_82576:
   15726 	case WM_T_82580:
   15727 	case WM_T_I350:
   15728 	case WM_T_I354:
   15729 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15730 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15731 		/* FALLTHROUGH */
   15732 	case WM_T_82541:
   15733 	case WM_T_82541_2:
   15734 	case WM_T_82547:
   15735 	case WM_T_82547_2:
   15736 	case WM_T_82571:
   15737 	case WM_T_82572:
   15738 	case WM_T_82574:
   15739 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15740 		break;
   15741 	case WM_T_ICH8:
   15742 	case WM_T_ICH9:
   15743 	case WM_T_ICH10:
   15744 	case WM_T_PCH:
   15745 	case WM_T_PCH2:
   15746 	case WM_T_PCH_LPT:
   15747 	case WM_T_PCH_SPT:
   15748 	case WM_T_PCH_CNP:
   15749 		sc->sc_flags |= WM_F_HAS_AMT;
   15750 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15751 		break;
   15752 	default:
   15753 		break;
   15754 	}
   15755 
   15756 	/* 1: HAS_MANAGE */
   15757 	if (wm_enable_mng_pass_thru(sc) != 0)
   15758 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15759 
   15760 	/*
   15761 	 * Note that the WOL flags is set after the resetting of the eeprom
   15762 	 * stuff
   15763 	 */
   15764 }
   15765 
   15766 /*
   15767  * Unconfigure Ultra Low Power mode.
   15768  * Only for I217 and newer (see below).
   15769  */
   15770 static int
   15771 wm_ulp_disable(struct wm_softc *sc)
   15772 {
   15773 	uint32_t reg;
   15774 	uint16_t phyreg;
   15775 	int i = 0, rv;
   15776 
   15777 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15778 		device_xname(sc->sc_dev), __func__));
   15779 	/* Exclude old devices */
   15780 	if ((sc->sc_type < WM_T_PCH_LPT)
   15781 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15782 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15783 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15784 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15785 		return 0;
   15786 
   15787 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15788 		/* Request ME un-configure ULP mode in the PHY */
   15789 		reg = CSR_READ(sc, WMREG_H2ME);
   15790 		reg &= ~H2ME_ULP;
   15791 		reg |= H2ME_ENFORCE_SETTINGS;
   15792 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15793 
   15794 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15795 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15796 			if (i++ == 30) {
   15797 				device_printf(sc->sc_dev, "%s timed out\n",
   15798 				    __func__);
   15799 				return -1;
   15800 			}
   15801 			delay(10 * 1000);
   15802 		}
   15803 		reg = CSR_READ(sc, WMREG_H2ME);
   15804 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15805 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15806 
   15807 		return 0;
   15808 	}
   15809 
   15810 	/* Acquire semaphore */
   15811 	rv = sc->phy.acquire(sc);
   15812 	if (rv != 0) {
   15813 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15814 		device_xname(sc->sc_dev), __func__));
   15815 		return rv;
   15816 	}
   15817 
   15818 	/* Toggle LANPHYPC */
   15819 	wm_toggle_lanphypc_pch_lpt(sc);
   15820 
   15821 	/* Unforce SMBus mode in PHY */
   15822 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15823 	if (rv != 0) {
   15824 		uint32_t reg2;
   15825 
   15826 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15827 			__func__);
   15828 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15829 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15830 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15831 		delay(50 * 1000);
   15832 
   15833 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15834 		    &phyreg);
   15835 		if (rv != 0)
   15836 			goto release;
   15837 	}
   15838 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15839 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15840 
   15841 	/* Unforce SMBus mode in MAC */
   15842 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15843 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15844 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15845 
   15846 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15847 	if (rv != 0)
   15848 		goto release;
   15849 	phyreg |= HV_PM_CTRL_K1_ENA;
   15850 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15851 
   15852 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15853 		&phyreg);
   15854 	if (rv != 0)
   15855 		goto release;
   15856 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15857 	    | I218_ULP_CONFIG1_STICKY_ULP
   15858 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15859 	    | I218_ULP_CONFIG1_WOL_HOST
   15860 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15861 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15862 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15863 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15864 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15865 	phyreg |= I218_ULP_CONFIG1_START;
   15866 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15867 
   15868 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15869 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15870 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15871 
   15872 release:
   15873 	/* Release semaphore */
   15874 	sc->phy.release(sc);
   15875 	wm_gmii_reset(sc);
   15876 	delay(50 * 1000);
   15877 
   15878 	return rv;
   15879 }
   15880 
   15881 /* WOL in the newer chipset interfaces (pchlan) */
   15882 static int
   15883 wm_enable_phy_wakeup(struct wm_softc *sc)
   15884 {
   15885 	device_t dev = sc->sc_dev;
   15886 	uint32_t mreg, moff;
   15887 	uint16_t wuce, wuc, wufc, preg;
   15888 	int i, rv;
   15889 
   15890 	KASSERT(sc->sc_type >= WM_T_PCH);
   15891 
   15892 	/* Copy MAC RARs to PHY RARs */
   15893 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15894 
   15895 	/* Activate PHY wakeup */
   15896 	rv = sc->phy.acquire(sc);
   15897 	if (rv != 0) {
   15898 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15899 		    __func__);
   15900 		return rv;
   15901 	}
   15902 
   15903 	/*
   15904 	 * Enable access to PHY wakeup registers.
   15905 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15906 	 */
   15907 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15908 	if (rv != 0) {
   15909 		device_printf(dev,
   15910 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15911 		goto release;
   15912 	}
   15913 
   15914 	/* Copy MAC MTA to PHY MTA */
   15915 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15916 		uint16_t lo, hi;
   15917 
   15918 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15919 		lo = (uint16_t)(mreg & 0xffff);
   15920 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15921 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15922 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15923 	}
   15924 
   15925 	/* Configure PHY Rx Control register */
   15926 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15927 	mreg = CSR_READ(sc, WMREG_RCTL);
   15928 	if (mreg & RCTL_UPE)
   15929 		preg |= BM_RCTL_UPE;
   15930 	if (mreg & RCTL_MPE)
   15931 		preg |= BM_RCTL_MPE;
   15932 	preg &= ~(BM_RCTL_MO_MASK);
   15933 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15934 	if (moff != 0)
   15935 		preg |= moff << BM_RCTL_MO_SHIFT;
   15936 	if (mreg & RCTL_BAM)
   15937 		preg |= BM_RCTL_BAM;
   15938 	if (mreg & RCTL_PMCF)
   15939 		preg |= BM_RCTL_PMCF;
   15940 	mreg = CSR_READ(sc, WMREG_CTRL);
   15941 	if (mreg & CTRL_RFCE)
   15942 		preg |= BM_RCTL_RFCE;
   15943 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15944 
   15945 	wuc = WUC_APME | WUC_PME_EN;
   15946 	wufc = WUFC_MAG;
   15947 	/* Enable PHY wakeup in MAC register */
   15948 	CSR_WRITE(sc, WMREG_WUC,
   15949 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15950 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15951 
   15952 	/* Configure and enable PHY wakeup in PHY registers */
   15953 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15954 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15955 
   15956 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15957 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15958 
   15959 release:
   15960 	sc->phy.release(sc);
   15961 
   15962 	return 0;
   15963 }
   15964 
   15965 /* Power down workaround on D3 */
   15966 static void
   15967 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15968 {
   15969 	uint32_t reg;
   15970 	uint16_t phyreg;
   15971 	int i;
   15972 
   15973 	for (i = 0; i < 2; i++) {
   15974 		/* Disable link */
   15975 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15976 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15977 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15978 
   15979 		/*
   15980 		 * Call gig speed drop workaround on Gig disable before
   15981 		 * accessing any PHY registers
   15982 		 */
   15983 		if (sc->sc_type == WM_T_ICH8)
   15984 			wm_gig_downshift_workaround_ich8lan(sc);
   15985 
   15986 		/* Write VR power-down enable */
   15987 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15988 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15989 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15990 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15991 
   15992 		/* Read it back and test */
   15993 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15994 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15995 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15996 			break;
   15997 
   15998 		/* Issue PHY reset and repeat at most one more time */
   15999 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16000 	}
   16001 }
   16002 
   16003 /*
   16004  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16005  *  @sc: pointer to the HW structure
   16006  *
   16007  *  During S0 to Sx transition, it is possible the link remains at gig
   16008  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16009  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16010  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16011  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16012  *  needs to be written.
   16013  *  Parts that support (and are linked to a partner which support) EEE in
   16014  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16015  *  than 10Mbps w/o EEE.
   16016  */
   16017 static void
   16018 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16019 {
   16020 	device_t dev = sc->sc_dev;
   16021 	struct ethercom *ec = &sc->sc_ethercom;
   16022 	uint32_t phy_ctrl;
   16023 	int rv;
   16024 
   16025 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16026 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16027 
   16028 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16029 
   16030 	if (sc->sc_phytype == WMPHY_I217) {
   16031 		uint16_t devid = sc->sc_pcidevid;
   16032 
   16033 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16034 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16035 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16036 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16037 		    (sc->sc_type >= WM_T_PCH_SPT))
   16038 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16039 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16040 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16041 
   16042 		if (sc->phy.acquire(sc) != 0)
   16043 			goto out;
   16044 
   16045 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16046 			uint16_t eee_advert;
   16047 
   16048 			rv = wm_read_emi_reg_locked(dev,
   16049 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16050 			if (rv)
   16051 				goto release;
   16052 
   16053 			/*
   16054 			 * Disable LPLU if both link partners support 100BaseT
   16055 			 * EEE and 100Full is advertised on both ends of the
   16056 			 * link, and enable Auto Enable LPI since there will
   16057 			 * be no driver to enable LPI while in Sx.
   16058 			 */
   16059 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16060 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16061 				uint16_t anar, phy_reg;
   16062 
   16063 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16064 				    &anar);
   16065 				if (anar & ANAR_TX_FD) {
   16066 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16067 					    PHY_CTRL_NOND0A_LPLU);
   16068 
   16069 					/* Set Auto Enable LPI after link up */
   16070 					sc->phy.readreg_locked(dev, 2,
   16071 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16072 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16073 					sc->phy.writereg_locked(dev, 2,
   16074 					    I217_LPI_GPIO_CTRL, phy_reg);
   16075 				}
   16076 			}
   16077 		}
   16078 
   16079 		/*
   16080 		 * For i217 Intel Rapid Start Technology support,
   16081 		 * when the system is going into Sx and no manageability engine
   16082 		 * is present, the driver must configure proxy to reset only on
   16083 		 * power good.	LPI (Low Power Idle) state must also reset only
   16084 		 * on power good, as well as the MTA (Multicast table array).
   16085 		 * The SMBus release must also be disabled on LCD reset.
   16086 		 */
   16087 
   16088 		/*
   16089 		 * Enable MTA to reset for Intel Rapid Start Technology
   16090 		 * Support
   16091 		 */
   16092 
   16093 release:
   16094 		sc->phy.release(sc);
   16095 	}
   16096 out:
   16097 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16098 
   16099 	if (sc->sc_type == WM_T_ICH8)
   16100 		wm_gig_downshift_workaround_ich8lan(sc);
   16101 
   16102 	if (sc->sc_type >= WM_T_PCH) {
   16103 		wm_oem_bits_config_ich8lan(sc, false);
   16104 
   16105 		/* Reset PHY to activate OEM bits on 82577/8 */
   16106 		if (sc->sc_type == WM_T_PCH)
   16107 			wm_reset_phy(sc);
   16108 
   16109 		if (sc->phy.acquire(sc) != 0)
   16110 			return;
   16111 		wm_write_smbus_addr(sc);
   16112 		sc->phy.release(sc);
   16113 	}
   16114 }
   16115 
   16116 /*
   16117  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16118  *  @sc: pointer to the HW structure
   16119  *
   16120  *  During Sx to S0 transitions on non-managed devices or managed devices
   16121  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16122  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16123  *  the PHY.
   16124  *  On i217, setup Intel Rapid Start Technology.
   16125  */
   16126 static int
   16127 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16128 {
   16129 	device_t dev = sc->sc_dev;
   16130 	int rv;
   16131 
   16132 	if (sc->sc_type < WM_T_PCH2)
   16133 		return 0;
   16134 
   16135 	rv = wm_init_phy_workarounds_pchlan(sc);
   16136 	if (rv != 0)
   16137 		return rv;
   16138 
   16139 	/* For i217 Intel Rapid Start Technology support when the system
   16140 	 * is transitioning from Sx and no manageability engine is present
   16141 	 * configure SMBus to restore on reset, disable proxy, and enable
   16142 	 * the reset on MTA (Multicast table array).
   16143 	 */
   16144 	if (sc->sc_phytype == WMPHY_I217) {
   16145 		uint16_t phy_reg;
   16146 
   16147 		rv = sc->phy.acquire(sc);
   16148 		if (rv != 0)
   16149 			return rv;
   16150 
   16151 		/* Clear Auto Enable LPI after link up */
   16152 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16153 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16154 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16155 
   16156 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16157 			/* Restore clear on SMB if no manageability engine
   16158 			 * is present
   16159 			 */
   16160 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16161 			    &phy_reg);
   16162 			if (rv != 0)
   16163 				goto release;
   16164 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16165 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16166 
   16167 			/* Disable Proxy */
   16168 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16169 		}
   16170 		/* Enable reset on MTA */
   16171 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16172 		if (rv != 0)
   16173 			goto release;
   16174 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16175 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16176 
   16177 release:
   16178 		sc->phy.release(sc);
   16179 		return rv;
   16180 	}
   16181 
   16182 	return 0;
   16183 }
   16184 
   16185 static void
   16186 wm_enable_wakeup(struct wm_softc *sc)
   16187 {
   16188 	uint32_t reg, pmreg;
   16189 	pcireg_t pmode;
   16190 	int rv = 0;
   16191 
   16192 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16193 		device_xname(sc->sc_dev), __func__));
   16194 
   16195 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16196 	    &pmreg, NULL) == 0)
   16197 		return;
   16198 
   16199 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16200 		goto pme;
   16201 
   16202 	/* Advertise the wakeup capability */
   16203 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16204 	    | CTRL_SWDPIN(3));
   16205 
   16206 	/* Keep the laser running on fiber adapters */
   16207 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16208 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16209 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16210 		reg |= CTRL_EXT_SWDPIN(3);
   16211 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16212 	}
   16213 
   16214 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16215 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16216 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16217 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16218 		wm_suspend_workarounds_ich8lan(sc);
   16219 
   16220 #if 0	/* For the multicast packet */
   16221 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16222 	reg |= WUFC_MC;
   16223 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16224 #endif
   16225 
   16226 	if (sc->sc_type >= WM_T_PCH) {
   16227 		rv = wm_enable_phy_wakeup(sc);
   16228 		if (rv != 0)
   16229 			goto pme;
   16230 	} else {
   16231 		/* Enable wakeup by the MAC */
   16232 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16233 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16234 	}
   16235 
   16236 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16237 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16238 		|| (sc->sc_type == WM_T_PCH2))
   16239 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16240 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16241 
   16242 pme:
   16243 	/* Request PME */
   16244 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16245 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16246 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16247 		/* For WOL */
   16248 		pmode |= PCI_PMCSR_PME_EN;
   16249 	} else {
   16250 		/* Disable WOL */
   16251 		pmode &= ~PCI_PMCSR_PME_EN;
   16252 	}
   16253 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16254 }
   16255 
   16256 /* Disable ASPM L0s and/or L1 for workaround */
   16257 static void
   16258 wm_disable_aspm(struct wm_softc *sc)
   16259 {
   16260 	pcireg_t reg, mask = 0;
   16261 	unsigned const char *str = "";
   16262 
   16263 	/*
   16264 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16265 	 * space.
   16266 	 */
   16267 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16268 		return;
   16269 
   16270 	switch (sc->sc_type) {
   16271 	case WM_T_82571:
   16272 	case WM_T_82572:
   16273 		/*
   16274 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16275 		 * State Power management L1 State (ASPM L1).
   16276 		 */
   16277 		mask = PCIE_LCSR_ASPM_L1;
   16278 		str = "L1 is";
   16279 		break;
   16280 	case WM_T_82573:
   16281 	case WM_T_82574:
   16282 	case WM_T_82583:
   16283 		/*
   16284 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16285 		 *
   16286 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16287 		 * some chipset.  The document of 82574 and 82583 says that
   16288 		 * disabling L0s with some specific chipset is sufficient,
   16289 		 * but we follow as of the Intel em driver does.
   16290 		 *
   16291 		 * References:
   16292 		 * Errata 8 of the Specification Update of i82573.
   16293 		 * Errata 20 of the Specification Update of i82574.
   16294 		 * Errata 9 of the Specification Update of i82583.
   16295 		 */
   16296 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16297 		str = "L0s and L1 are";
   16298 		break;
   16299 	default:
   16300 		return;
   16301 	}
   16302 
   16303 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16304 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16305 	reg &= ~mask;
   16306 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16307 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16308 
   16309 	/* Print only in wm_attach() */
   16310 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16311 		aprint_verbose_dev(sc->sc_dev,
   16312 		    "ASPM %s disabled to workaround the errata.\n", str);
   16313 }
   16314 
   16315 /* LPLU */
   16316 
   16317 static void
   16318 wm_lplu_d0_disable(struct wm_softc *sc)
   16319 {
   16320 	struct mii_data *mii = &sc->sc_mii;
   16321 	uint32_t reg;
   16322 	uint16_t phyval;
   16323 
   16324 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16325 		device_xname(sc->sc_dev), __func__));
   16326 
   16327 	if (sc->sc_phytype == WMPHY_IFE)
   16328 		return;
   16329 
   16330 	switch (sc->sc_type) {
   16331 	case WM_T_82571:
   16332 	case WM_T_82572:
   16333 	case WM_T_82573:
   16334 	case WM_T_82575:
   16335 	case WM_T_82576:
   16336 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16337 		phyval &= ~PMR_D0_LPLU;
   16338 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16339 		break;
   16340 	case WM_T_82580:
   16341 	case WM_T_I350:
   16342 	case WM_T_I210:
   16343 	case WM_T_I211:
   16344 		reg = CSR_READ(sc, WMREG_PHPM);
   16345 		reg &= ~PHPM_D0A_LPLU;
   16346 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16347 		break;
   16348 	case WM_T_82574:
   16349 	case WM_T_82583:
   16350 	case WM_T_ICH8:
   16351 	case WM_T_ICH9:
   16352 	case WM_T_ICH10:
   16353 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16354 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16355 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16356 		CSR_WRITE_FLUSH(sc);
   16357 		break;
   16358 	case WM_T_PCH:
   16359 	case WM_T_PCH2:
   16360 	case WM_T_PCH_LPT:
   16361 	case WM_T_PCH_SPT:
   16362 	case WM_T_PCH_CNP:
   16363 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16364 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16365 		if (wm_phy_resetisblocked(sc) == false)
   16366 			phyval |= HV_OEM_BITS_ANEGNOW;
   16367 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16368 		break;
   16369 	default:
   16370 		break;
   16371 	}
   16372 }
   16373 
   16374 /* EEE */
   16375 
   16376 static int
   16377 wm_set_eee_i350(struct wm_softc *sc)
   16378 {
   16379 	struct ethercom *ec = &sc->sc_ethercom;
   16380 	uint32_t ipcnfg, eeer;
   16381 	uint32_t ipcnfg_mask
   16382 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16383 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16384 
   16385 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16386 
   16387 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16388 	eeer = CSR_READ(sc, WMREG_EEER);
   16389 
   16390 	/* Enable or disable per user setting */
   16391 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16392 		ipcnfg |= ipcnfg_mask;
   16393 		eeer |= eeer_mask;
   16394 	} else {
   16395 		ipcnfg &= ~ipcnfg_mask;
   16396 		eeer &= ~eeer_mask;
   16397 	}
   16398 
   16399 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16400 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16401 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16402 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16403 
   16404 	return 0;
   16405 }
   16406 
   16407 static int
   16408 wm_set_eee_pchlan(struct wm_softc *sc)
   16409 {
   16410 	device_t dev = sc->sc_dev;
   16411 	struct ethercom *ec = &sc->sc_ethercom;
   16412 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16413 	int rv;
   16414 
   16415 	switch (sc->sc_phytype) {
   16416 	case WMPHY_82579:
   16417 		lpa = I82579_EEE_LP_ABILITY;
   16418 		pcs_status = I82579_EEE_PCS_STATUS;
   16419 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16420 		break;
   16421 	case WMPHY_I217:
   16422 		lpa = I217_EEE_LP_ABILITY;
   16423 		pcs_status = I217_EEE_PCS_STATUS;
   16424 		adv_addr = I217_EEE_ADVERTISEMENT;
   16425 		break;
   16426 	default:
   16427 		return 0;
   16428 	}
   16429 
   16430 	rv = sc->phy.acquire(sc);
   16431 	if (rv != 0) {
   16432 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16433 		return rv;
   16434 	}
   16435 
   16436 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16437 	if (rv != 0)
   16438 		goto release;
   16439 
   16440 	/* Clear bits that enable EEE in various speeds */
   16441 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16442 
   16443 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16444 		/* Save off link partner's EEE ability */
   16445 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16446 		if (rv != 0)
   16447 			goto release;
   16448 
   16449 		/* Read EEE advertisement */
   16450 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16451 			goto release;
   16452 
   16453 		/*
   16454 		 * Enable EEE only for speeds in which the link partner is
   16455 		 * EEE capable and for which we advertise EEE.
   16456 		 */
   16457 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16458 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16459 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16460 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16461 			if ((data & ANLPAR_TX_FD) != 0)
   16462 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16463 			else {
   16464 				/*
   16465 				 * EEE is not supported in 100Half, so ignore
   16466 				 * partner's EEE in 100 ability if full-duplex
   16467 				 * is not advertised.
   16468 				 */
   16469 				sc->eee_lp_ability
   16470 				    &= ~AN_EEEADVERT_100_TX;
   16471 			}
   16472 		}
   16473 	}
   16474 
   16475 	if (sc->sc_phytype == WMPHY_82579) {
   16476 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16477 		if (rv != 0)
   16478 			goto release;
   16479 
   16480 		data &= ~I82579_LPI_PLL_SHUT_100;
   16481 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16482 	}
   16483 
   16484 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16485 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16486 		goto release;
   16487 
   16488 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16489 release:
   16490 	sc->phy.release(sc);
   16491 
   16492 	return rv;
   16493 }
   16494 
   16495 static int
   16496 wm_set_eee(struct wm_softc *sc)
   16497 {
   16498 	struct ethercom *ec = &sc->sc_ethercom;
   16499 
   16500 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16501 		return 0;
   16502 
   16503 	if (sc->sc_type == WM_T_I354) {
   16504 		/* I354 uses an external PHY */
   16505 		return 0; /* not yet */
   16506 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16507 		return wm_set_eee_i350(sc);
   16508 	else if (sc->sc_type >= WM_T_PCH2)
   16509 		return wm_set_eee_pchlan(sc);
   16510 
   16511 	return 0;
   16512 }
   16513 
   16514 /*
   16515  * Workarounds (mainly PHY related).
   16516  * Basically, PHY's workarounds are in the PHY drivers.
   16517  */
   16518 
   16519 /* Workaround for 82566 Kumeran PCS lock loss */
   16520 static int
   16521 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16522 {
   16523 	struct mii_data *mii = &sc->sc_mii;
   16524 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16525 	int i, reg, rv;
   16526 	uint16_t phyreg;
   16527 
   16528 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16529 		device_xname(sc->sc_dev), __func__));
   16530 
   16531 	/* If the link is not up, do nothing */
   16532 	if ((status & STATUS_LU) == 0)
   16533 		return 0;
   16534 
   16535 	/* Nothing to do if the link is other than 1Gbps */
   16536 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16537 		return 0;
   16538 
   16539 	for (i = 0; i < 10; i++) {
   16540 		/* read twice */
   16541 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16542 		if (rv != 0)
   16543 			return rv;
   16544 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16545 		if (rv != 0)
   16546 			return rv;
   16547 
   16548 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16549 			goto out;	/* GOOD! */
   16550 
   16551 		/* Reset the PHY */
   16552 		wm_reset_phy(sc);
   16553 		delay(5*1000);
   16554 	}
   16555 
   16556 	/* Disable GigE link negotiation */
   16557 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16558 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16559 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16560 
   16561 	/*
   16562 	 * Call gig speed drop workaround on Gig disable before accessing
   16563 	 * any PHY registers.
   16564 	 */
   16565 	wm_gig_downshift_workaround_ich8lan(sc);
   16566 
   16567 out:
   16568 	return 0;
   16569 }
   16570 
   16571 /*
   16572  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16573  *  @sc: pointer to the HW structure
   16574  *
   16575  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16576  *  LPLU, Gig disable, MDIC PHY reset):
   16577  *    1) Set Kumeran Near-end loopback
   16578  *    2) Clear Kumeran Near-end loopback
   16579  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16580  */
   16581 static void
   16582 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16583 {
   16584 	uint16_t kmreg;
   16585 
   16586 	/* Only for igp3 */
   16587 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16588 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16589 			return;
   16590 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16591 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16592 			return;
   16593 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16594 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16595 	}
   16596 }
   16597 
   16598 /*
   16599  * Workaround for pch's PHYs
   16600  * XXX should be moved to new PHY driver?
   16601  */
   16602 static int
   16603 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16604 {
   16605 	device_t dev = sc->sc_dev;
   16606 	struct mii_data *mii = &sc->sc_mii;
   16607 	struct mii_softc *child;
   16608 	uint16_t phy_data, phyrev = 0;
   16609 	int phytype = sc->sc_phytype;
   16610 	int rv;
   16611 
   16612 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16613 		device_xname(dev), __func__));
   16614 	KASSERT(sc->sc_type == WM_T_PCH);
   16615 
   16616 	/* Set MDIO slow mode before any other MDIO access */
   16617 	if (phytype == WMPHY_82577)
   16618 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16619 			return rv;
   16620 
   16621 	child = LIST_FIRST(&mii->mii_phys);
   16622 	if (child != NULL)
   16623 		phyrev = child->mii_mpd_rev;
   16624 
   16625 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16626 	if ((child != NULL) &&
   16627 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16628 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16629 		/* Disable generation of early preamble (0x4431) */
   16630 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16631 		    &phy_data);
   16632 		if (rv != 0)
   16633 			return rv;
   16634 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16635 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16636 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16637 		    phy_data);
   16638 		if (rv != 0)
   16639 			return rv;
   16640 
   16641 		/* Preamble tuning for SSC */
   16642 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16643 		if (rv != 0)
   16644 			return rv;
   16645 	}
   16646 
   16647 	/* 82578 */
   16648 	if (phytype == WMPHY_82578) {
   16649 		/*
   16650 		 * Return registers to default by doing a soft reset then
   16651 		 * writing 0x3140 to the control register
   16652 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16653 		 */
   16654 		if ((child != NULL) && (phyrev < 2)) {
   16655 			PHY_RESET(child);
   16656 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16657 			if (rv != 0)
   16658 				return rv;
   16659 		}
   16660 	}
   16661 
   16662 	/* Select page 0 */
   16663 	if ((rv = sc->phy.acquire(sc)) != 0)
   16664 		return rv;
   16665 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16666 	sc->phy.release(sc);
   16667 	if (rv != 0)
   16668 		return rv;
   16669 
   16670 	/*
   16671 	 * Configure the K1 Si workaround during phy reset assuming there is
   16672 	 * link so that it disables K1 if link is in 1Gbps.
   16673 	 */
   16674 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16675 		return rv;
   16676 
   16677 	/* Workaround for link disconnects on a busy hub in half duplex */
   16678 	rv = sc->phy.acquire(sc);
   16679 	if (rv)
   16680 		return rv;
   16681 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16682 	if (rv)
   16683 		goto release;
   16684 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16685 	    phy_data & 0x00ff);
   16686 	if (rv)
   16687 		goto release;
   16688 
   16689 	/* Set MSE higher to enable link to stay up when noise is high */
   16690 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16691 release:
   16692 	sc->phy.release(sc);
   16693 
   16694 	return rv;
   16695 }
   16696 
   16697 /*
   16698  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16699  *  @sc:   pointer to the HW structure
   16700  */
   16701 static void
   16702 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16703 {
   16704 
   16705 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16706 		device_xname(sc->sc_dev), __func__));
   16707 
   16708 	if (sc->phy.acquire(sc) != 0)
   16709 		return;
   16710 
   16711 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16712 
   16713 	sc->phy.release(sc);
   16714 }
   16715 
   16716 static void
   16717 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16718 {
   16719 	device_t dev = sc->sc_dev;
   16720 	uint32_t mac_reg;
   16721 	uint16_t i, wuce;
   16722 	int count;
   16723 
   16724 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16725 		device_xname(dev), __func__));
   16726 
   16727 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16728 		return;
   16729 
   16730 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16731 	count = wm_rar_count(sc);
   16732 	for (i = 0; i < count; i++) {
   16733 		uint16_t lo, hi;
   16734 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16735 		lo = (uint16_t)(mac_reg & 0xffff);
   16736 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16737 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16738 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16739 
   16740 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16741 		lo = (uint16_t)(mac_reg & 0xffff);
   16742 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16743 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16744 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16745 	}
   16746 
   16747 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16748 }
   16749 
   16750 /*
   16751  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16752  *  with 82579 PHY
   16753  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16754  */
   16755 static int
   16756 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16757 {
   16758 	device_t dev = sc->sc_dev;
   16759 	int rar_count;
   16760 	int rv;
   16761 	uint32_t mac_reg;
   16762 	uint16_t dft_ctrl, data;
   16763 	uint16_t i;
   16764 
   16765 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16766 		device_xname(dev), __func__));
   16767 
   16768 	if (sc->sc_type < WM_T_PCH2)
   16769 		return 0;
   16770 
   16771 	/* Acquire PHY semaphore */
   16772 	rv = sc->phy.acquire(sc);
   16773 	if (rv != 0)
   16774 		return rv;
   16775 
   16776 	/* Disable Rx path while enabling/disabling workaround */
   16777 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16778 	if (rv != 0)
   16779 		goto out;
   16780 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16781 	    dft_ctrl | (1 << 14));
   16782 	if (rv != 0)
   16783 		goto out;
   16784 
   16785 	if (enable) {
   16786 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16787 		 * SHRAL/H) and initial CRC values to the MAC
   16788 		 */
   16789 		rar_count = wm_rar_count(sc);
   16790 		for (i = 0; i < rar_count; i++) {
   16791 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16792 			uint32_t addr_high, addr_low;
   16793 
   16794 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16795 			if (!(addr_high & RAL_AV))
   16796 				continue;
   16797 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16798 			mac_addr[0] = (addr_low & 0xFF);
   16799 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16800 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16801 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16802 			mac_addr[4] = (addr_high & 0xFF);
   16803 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16804 
   16805 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16806 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16807 		}
   16808 
   16809 		/* Write Rx addresses to the PHY */
   16810 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16811 	}
   16812 
   16813 	/*
   16814 	 * If enable ==
   16815 	 *	true: Enable jumbo frame workaround in the MAC.
   16816 	 *	false: Write MAC register values back to h/w defaults.
   16817 	 */
   16818 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16819 	if (enable) {
   16820 		mac_reg &= ~(1 << 14);
   16821 		mac_reg |= (7 << 15);
   16822 	} else
   16823 		mac_reg &= ~(0xf << 14);
   16824 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16825 
   16826 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16827 	if (enable) {
   16828 		mac_reg |= RCTL_SECRC;
   16829 		sc->sc_rctl |= RCTL_SECRC;
   16830 		sc->sc_flags |= WM_F_CRC_STRIP;
   16831 	} else {
   16832 		mac_reg &= ~RCTL_SECRC;
   16833 		sc->sc_rctl &= ~RCTL_SECRC;
   16834 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16835 	}
   16836 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16837 
   16838 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16839 	if (rv != 0)
   16840 		goto out;
   16841 	if (enable)
   16842 		data |= 1 << 0;
   16843 	else
   16844 		data &= ~(1 << 0);
   16845 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16846 	if (rv != 0)
   16847 		goto out;
   16848 
   16849 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16850 	if (rv != 0)
   16851 		goto out;
   16852 	/*
   16853 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16854 	 * on both the enable case and the disable case. Is it correct?
   16855 	 */
   16856 	data &= ~(0xf << 8);
   16857 	data |= (0xb << 8);
   16858 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16859 	if (rv != 0)
   16860 		goto out;
   16861 
   16862 	/*
   16863 	 * If enable ==
   16864 	 *	true: Enable jumbo frame workaround in the PHY.
   16865 	 *	false: Write PHY register values back to h/w defaults.
   16866 	 */
   16867 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16868 	if (rv != 0)
   16869 		goto out;
   16870 	data &= ~(0x7F << 5);
   16871 	if (enable)
   16872 		data |= (0x37 << 5);
   16873 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16874 	if (rv != 0)
   16875 		goto out;
   16876 
   16877 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16878 	if (rv != 0)
   16879 		goto out;
   16880 	if (enable)
   16881 		data &= ~(1 << 13);
   16882 	else
   16883 		data |= (1 << 13);
   16884 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16885 	if (rv != 0)
   16886 		goto out;
   16887 
   16888 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16889 	if (rv != 0)
   16890 		goto out;
   16891 	data &= ~(0x3FF << 2);
   16892 	if (enable)
   16893 		data |= (I82579_TX_PTR_GAP << 2);
   16894 	else
   16895 		data |= (0x8 << 2);
   16896 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16897 	if (rv != 0)
   16898 		goto out;
   16899 
   16900 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16901 	    enable ? 0xf100 : 0x7e00);
   16902 	if (rv != 0)
   16903 		goto out;
   16904 
   16905 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16906 	if (rv != 0)
   16907 		goto out;
   16908 	if (enable)
   16909 		data |= 1 << 10;
   16910 	else
   16911 		data &= ~(1 << 10);
   16912 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16913 	if (rv != 0)
   16914 		goto out;
   16915 
   16916 	/* Re-enable Rx path after enabling/disabling workaround */
   16917 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16918 	    dft_ctrl & ~(1 << 14));
   16919 
   16920 out:
   16921 	sc->phy.release(sc);
   16922 
   16923 	return rv;
   16924 }
   16925 
   16926 /*
   16927  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16928  *  done after every PHY reset.
   16929  */
   16930 static int
   16931 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16932 {
   16933 	device_t dev = sc->sc_dev;
   16934 	int rv;
   16935 
   16936 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16937 		device_xname(dev), __func__));
   16938 	KASSERT(sc->sc_type == WM_T_PCH2);
   16939 
   16940 	/* Set MDIO slow mode before any other MDIO access */
   16941 	rv = wm_set_mdio_slow_mode_hv(sc);
   16942 	if (rv != 0)
   16943 		return rv;
   16944 
   16945 	rv = sc->phy.acquire(sc);
   16946 	if (rv != 0)
   16947 		return rv;
   16948 	/* Set MSE higher to enable link to stay up when noise is high */
   16949 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16950 	if (rv != 0)
   16951 		goto release;
   16952 	/* Drop link after 5 times MSE threshold was reached */
   16953 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16954 release:
   16955 	sc->phy.release(sc);
   16956 
   16957 	return rv;
   16958 }
   16959 
   16960 /**
   16961  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16962  *  @link: link up bool flag
   16963  *
   16964  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16965  *  preventing further DMA write requests.  Workaround the issue by disabling
   16966  *  the de-assertion of the clock request when in 1Gpbs mode.
   16967  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16968  *  speeds in order to avoid Tx hangs.
   16969  **/
   16970 static int
   16971 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16972 {
   16973 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16974 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16975 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16976 	uint16_t phyreg;
   16977 
   16978 	if (link && (speed == STATUS_SPEED_1000)) {
   16979 		int rv;
   16980 
   16981 		rv = sc->phy.acquire(sc);
   16982 		if (rv != 0)
   16983 			return rv;
   16984 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16985 		    &phyreg);
   16986 		if (rv != 0)
   16987 			goto release;
   16988 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16989 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16990 		if (rv != 0)
   16991 			goto release;
   16992 		delay(20);
   16993 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16994 
   16995 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16996 		    &phyreg);
   16997 release:
   16998 		sc->phy.release(sc);
   16999 		return rv;
   17000 	}
   17001 
   17002 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17003 
   17004 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17005 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17006 	    || !link
   17007 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17008 		goto update_fextnvm6;
   17009 
   17010 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17011 
   17012 	/* Clear link status transmit timeout */
   17013 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17014 	if (speed == STATUS_SPEED_100) {
   17015 		/* Set inband Tx timeout to 5x10us for 100Half */
   17016 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17017 
   17018 		/* Do not extend the K1 entry latency for 100Half */
   17019 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17020 	} else {
   17021 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17022 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17023 
   17024 		/* Extend the K1 entry latency for 10 Mbps */
   17025 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17026 	}
   17027 
   17028 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17029 
   17030 update_fextnvm6:
   17031 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17032 	return 0;
   17033 }
   17034 
   17035 /*
   17036  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17037  *  @sc:   pointer to the HW structure
   17038  *  @link: link up bool flag
   17039  *
   17040  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17041  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17042  *  If link is down, the function will restore the default K1 setting located
   17043  *  in the NVM.
   17044  */
   17045 static int
   17046 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17047 {
   17048 	int k1_enable = sc->sc_nvm_k1_enabled;
   17049 	int rv;
   17050 
   17051 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17052 		device_xname(sc->sc_dev), __func__));
   17053 
   17054 	rv = sc->phy.acquire(sc);
   17055 	if (rv != 0)
   17056 		return rv;
   17057 
   17058 	if (link) {
   17059 		k1_enable = 0;
   17060 
   17061 		/* Link stall fix for link up */
   17062 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17063 		    0x0100);
   17064 	} else {
   17065 		/* Link stall fix for link down */
   17066 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17067 		    0x4100);
   17068 	}
   17069 
   17070 	wm_configure_k1_ich8lan(sc, k1_enable);
   17071 	sc->phy.release(sc);
   17072 
   17073 	return 0;
   17074 }
   17075 
   17076 /*
   17077  *  wm_k1_workaround_lv - K1 Si workaround
   17078  *  @sc:   pointer to the HW structure
   17079  *
   17080  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17081  *  Disable K1 for 1000 and 100 speeds
   17082  */
   17083 static int
   17084 wm_k1_workaround_lv(struct wm_softc *sc)
   17085 {
   17086 	uint32_t reg;
   17087 	uint16_t phyreg;
   17088 	int rv;
   17089 
   17090 	if (sc->sc_type != WM_T_PCH2)
   17091 		return 0;
   17092 
   17093 	/* Set K1 beacon duration based on 10Mbps speed */
   17094 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17095 	if (rv != 0)
   17096 		return rv;
   17097 
   17098 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17099 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17100 		if (phyreg &
   17101 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17102 			/* LV 1G/100 Packet drop issue wa  */
   17103 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17104 			    &phyreg);
   17105 			if (rv != 0)
   17106 				return rv;
   17107 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17108 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17109 			    phyreg);
   17110 			if (rv != 0)
   17111 				return rv;
   17112 		} else {
   17113 			/* For 10Mbps */
   17114 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17115 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17116 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17117 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17118 		}
   17119 	}
   17120 
   17121 	return 0;
   17122 }
   17123 
   17124 /*
   17125  *  wm_link_stall_workaround_hv - Si workaround
   17126  *  @sc: pointer to the HW structure
   17127  *
   17128  *  This function works around a Si bug where the link partner can get
   17129  *  a link up indication before the PHY does. If small packets are sent
   17130  *  by the link partner they can be placed in the packet buffer without
   17131  *  being properly accounted for by the PHY and will stall preventing
   17132  *  further packets from being received.  The workaround is to clear the
   17133  *  packet buffer after the PHY detects link up.
   17134  */
   17135 static int
   17136 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17137 {
   17138 	uint16_t phyreg;
   17139 
   17140 	if (sc->sc_phytype != WMPHY_82578)
   17141 		return 0;
   17142 
   17143 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17144 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17145 	if ((phyreg & BMCR_LOOP) != 0)
   17146 		return 0;
   17147 
   17148 	/* Check if link is up and at 1Gbps */
   17149 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17150 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17151 	    | BM_CS_STATUS_SPEED_MASK;
   17152 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17153 		| BM_CS_STATUS_SPEED_1000))
   17154 		return 0;
   17155 
   17156 	delay(200 * 1000);	/* XXX too big */
   17157 
   17158 	/* Flush the packets in the fifo buffer */
   17159 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17160 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17161 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17162 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17163 
   17164 	return 0;
   17165 }
   17166 
   17167 static int
   17168 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17169 {
   17170 	int rv;
   17171 
   17172 	rv = sc->phy.acquire(sc);
   17173 	if (rv != 0) {
   17174 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17175 		    __func__);
   17176 		return rv;
   17177 	}
   17178 
   17179 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17180 
   17181 	sc->phy.release(sc);
   17182 
   17183 	return rv;
   17184 }
   17185 
   17186 static int
   17187 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17188 {
   17189 	int rv;
   17190 	uint16_t reg;
   17191 
   17192 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17193 	if (rv != 0)
   17194 		return rv;
   17195 
   17196 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17197 	    reg | HV_KMRN_MDIO_SLOW);
   17198 }
   17199 
   17200 /*
   17201  *  wm_configure_k1_ich8lan - Configure K1 power state
   17202  *  @sc: pointer to the HW structure
   17203  *  @enable: K1 state to configure
   17204  *
   17205  *  Configure the K1 power state based on the provided parameter.
   17206  *  Assumes semaphore already acquired.
   17207  */
   17208 static void
   17209 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17210 {
   17211 	uint32_t ctrl, ctrl_ext, tmp;
   17212 	uint16_t kmreg;
   17213 	int rv;
   17214 
   17215 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17216 
   17217 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17218 	if (rv != 0)
   17219 		return;
   17220 
   17221 	if (k1_enable)
   17222 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17223 	else
   17224 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17225 
   17226 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17227 	if (rv != 0)
   17228 		return;
   17229 
   17230 	delay(20);
   17231 
   17232 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17233 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17234 
   17235 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17236 	tmp |= CTRL_FRCSPD;
   17237 
   17238 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17239 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17240 	CSR_WRITE_FLUSH(sc);
   17241 	delay(20);
   17242 
   17243 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17244 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17245 	CSR_WRITE_FLUSH(sc);
   17246 	delay(20);
   17247 
   17248 	return;
   17249 }
   17250 
   17251 /* special case - for 82575 - need to do manual init ... */
   17252 static void
   17253 wm_reset_init_script_82575(struct wm_softc *sc)
   17254 {
   17255 	/*
   17256 	 * Remark: this is untested code - we have no board without EEPROM
   17257 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17258 	 */
   17259 
   17260 	/* SerDes configuration via SERDESCTRL */
   17261 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17262 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17263 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17264 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17265 
   17266 	/* CCM configuration via CCMCTL register */
   17267 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17268 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17269 
   17270 	/* PCIe lanes configuration */
   17271 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17272 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17273 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17274 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17275 
   17276 	/* PCIe PLL Configuration */
   17277 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17278 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17279 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17280 }
   17281 
   17282 static void
   17283 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17284 {
   17285 	uint32_t reg;
   17286 	uint16_t nvmword;
   17287 	int rv;
   17288 
   17289 	if (sc->sc_type != WM_T_82580)
   17290 		return;
   17291 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17292 		return;
   17293 
   17294 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17295 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17296 	if (rv != 0) {
   17297 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17298 		    __func__);
   17299 		return;
   17300 	}
   17301 
   17302 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17303 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17304 		reg |= MDICNFG_DEST;
   17305 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17306 		reg |= MDICNFG_COM_MDIO;
   17307 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17308 }
   17309 
   17310 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17311 
   17312 static bool
   17313 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17314 {
   17315 	uint32_t reg;
   17316 	uint16_t id1, id2;
   17317 	int i, rv;
   17318 
   17319 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17320 		device_xname(sc->sc_dev), __func__));
   17321 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17322 
   17323 	id1 = id2 = 0xffff;
   17324 	for (i = 0; i < 2; i++) {
   17325 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17326 		    &id1);
   17327 		if ((rv != 0) || MII_INVALIDID(id1))
   17328 			continue;
   17329 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17330 		    &id2);
   17331 		if ((rv != 0) || MII_INVALIDID(id2))
   17332 			continue;
   17333 		break;
   17334 	}
   17335 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17336 		goto out;
   17337 
   17338 	/*
   17339 	 * In case the PHY needs to be in mdio slow mode,
   17340 	 * set slow mode and try to get the PHY id again.
   17341 	 */
   17342 	rv = 0;
   17343 	if (sc->sc_type < WM_T_PCH_LPT) {
   17344 		wm_set_mdio_slow_mode_hv_locked(sc);
   17345 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17346 		    &id1);
   17347 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17348 		    &id2);
   17349 	}
   17350 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17351 		device_printf(sc->sc_dev, "XXX return with false\n");
   17352 		return false;
   17353 	}
   17354 out:
   17355 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17356 		/* Only unforce SMBus if ME is not active */
   17357 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17358 			uint16_t phyreg;
   17359 
   17360 			/* Unforce SMBus mode in PHY */
   17361 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17362 			    CV_SMB_CTRL, &phyreg);
   17363 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17364 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17365 			    CV_SMB_CTRL, phyreg);
   17366 
   17367 			/* Unforce SMBus mode in MAC */
   17368 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17369 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17370 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17371 		}
   17372 	}
   17373 	return true;
   17374 }
   17375 
   17376 static void
   17377 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17378 {
   17379 	uint32_t reg;
   17380 	int i;
   17381 
   17382 	/* Set PHY Config Counter to 50msec */
   17383 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17384 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17385 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17386 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17387 
   17388 	/* Toggle LANPHYPC */
   17389 	reg = CSR_READ(sc, WMREG_CTRL);
   17390 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17391 	reg &= ~CTRL_LANPHYPC_VALUE;
   17392 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17393 	CSR_WRITE_FLUSH(sc);
   17394 	delay(1000);
   17395 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17396 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17397 	CSR_WRITE_FLUSH(sc);
   17398 
   17399 	if (sc->sc_type < WM_T_PCH_LPT)
   17400 		delay(50 * 1000);
   17401 	else {
   17402 		i = 20;
   17403 
   17404 		do {
   17405 			delay(5 * 1000);
   17406 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17407 		    && i--);
   17408 
   17409 		delay(30 * 1000);
   17410 	}
   17411 }
   17412 
   17413 static int
   17414 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17415 {
   17416 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17417 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17418 	uint32_t rxa;
   17419 	uint16_t scale = 0, lat_enc = 0;
   17420 	int32_t obff_hwm = 0;
   17421 	int64_t lat_ns, value;
   17422 
   17423 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17424 		device_xname(sc->sc_dev), __func__));
   17425 
   17426 	if (link) {
   17427 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17428 		uint32_t status;
   17429 		uint16_t speed;
   17430 		pcireg_t preg;
   17431 
   17432 		status = CSR_READ(sc, WMREG_STATUS);
   17433 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17434 		case STATUS_SPEED_10:
   17435 			speed = 10;
   17436 			break;
   17437 		case STATUS_SPEED_100:
   17438 			speed = 100;
   17439 			break;
   17440 		case STATUS_SPEED_1000:
   17441 			speed = 1000;
   17442 			break;
   17443 		default:
   17444 			device_printf(sc->sc_dev, "Unknown speed "
   17445 			    "(status = %08x)\n", status);
   17446 			return -1;
   17447 		}
   17448 
   17449 		/* Rx Packet Buffer Allocation size (KB) */
   17450 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17451 
   17452 		/*
   17453 		 * Determine the maximum latency tolerated by the device.
   17454 		 *
   17455 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17456 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17457 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17458 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17459 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17460 		 */
   17461 		lat_ns = ((int64_t)rxa * 1024 -
   17462 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17463 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17464 		if (lat_ns < 0)
   17465 			lat_ns = 0;
   17466 		else
   17467 			lat_ns /= speed;
   17468 		value = lat_ns;
   17469 
   17470 		while (value > LTRV_VALUE) {
   17471 			scale ++;
   17472 			value = howmany(value, __BIT(5));
   17473 		}
   17474 		if (scale > LTRV_SCALE_MAX) {
   17475 			device_printf(sc->sc_dev,
   17476 			    "Invalid LTR latency scale %d\n", scale);
   17477 			return -1;
   17478 		}
   17479 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17480 
   17481 		/* Determine the maximum latency tolerated by the platform */
   17482 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17483 		    WM_PCI_LTR_CAP_LPT);
   17484 		max_snoop = preg & 0xffff;
   17485 		max_nosnoop = preg >> 16;
   17486 
   17487 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17488 
   17489 		if (lat_enc > max_ltr_enc) {
   17490 			lat_enc = max_ltr_enc;
   17491 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17492 			    * PCI_LTR_SCALETONS(
   17493 				    __SHIFTOUT(lat_enc,
   17494 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17495 		}
   17496 
   17497 		if (lat_ns) {
   17498 			lat_ns *= speed * 1000;
   17499 			lat_ns /= 8;
   17500 			lat_ns /= 1000000000;
   17501 			obff_hwm = (int32_t)(rxa - lat_ns);
   17502 		}
   17503 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17504 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17505 			    "(rxa = %d, lat_ns = %d)\n",
   17506 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17507 			return -1;
   17508 		}
   17509 	}
   17510 	/* Snoop and No-Snoop latencies the same */
   17511 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17512 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17513 
   17514 	/* Set OBFF high water mark */
   17515 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17516 	reg |= obff_hwm;
   17517 	CSR_WRITE(sc, WMREG_SVT, reg);
   17518 
   17519 	/* Enable OBFF */
   17520 	reg = CSR_READ(sc, WMREG_SVCR);
   17521 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17522 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17523 
   17524 	return 0;
   17525 }
   17526 
   17527 /*
   17528  * I210 Errata 25 and I211 Errata 10
   17529  * Slow System Clock.
   17530  *
   17531  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17532  */
   17533 static int
   17534 wm_pll_workaround_i210(struct wm_softc *sc)
   17535 {
   17536 	uint32_t mdicnfg, wuc;
   17537 	uint32_t reg;
   17538 	pcireg_t pcireg;
   17539 	uint32_t pmreg;
   17540 	uint16_t nvmword, tmp_nvmword;
   17541 	uint16_t phyval;
   17542 	bool wa_done = false;
   17543 	int i, rv = 0;
   17544 
   17545 	/* Get Power Management cap offset */
   17546 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17547 	    &pmreg, NULL) == 0)
   17548 		return -1;
   17549 
   17550 	/* Save WUC and MDICNFG registers */
   17551 	wuc = CSR_READ(sc, WMREG_WUC);
   17552 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17553 
   17554 	reg = mdicnfg & ~MDICNFG_DEST;
   17555 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17556 
   17557 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17558 		/*
   17559 		 * The default value of the Initialization Control Word 1
   17560 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17561 		 */
   17562 		nvmword = INVM_DEFAULT_AL;
   17563 	}
   17564 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17565 
   17566 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17567 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17568 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17569 
   17570 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17571 			rv = 0;
   17572 			break; /* OK */
   17573 		} else
   17574 			rv = -1;
   17575 
   17576 		wa_done = true;
   17577 		/* Directly reset the internal PHY */
   17578 		reg = CSR_READ(sc, WMREG_CTRL);
   17579 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17580 
   17581 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17582 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17583 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17584 
   17585 		CSR_WRITE(sc, WMREG_WUC, 0);
   17586 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17587 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17588 
   17589 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17590 		    pmreg + PCI_PMCSR);
   17591 		pcireg |= PCI_PMCSR_STATE_D3;
   17592 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17593 		    pmreg + PCI_PMCSR, pcireg);
   17594 		delay(1000);
   17595 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17596 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17597 		    pmreg + PCI_PMCSR, pcireg);
   17598 
   17599 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17600 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17601 
   17602 		/* Restore WUC register */
   17603 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17604 	}
   17605 
   17606 	/* Restore MDICNFG setting */
   17607 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17608 	if (wa_done)
   17609 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17610 	return rv;
   17611 }
   17612 
   17613 static void
   17614 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17615 {
   17616 	uint32_t reg;
   17617 
   17618 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17619 		device_xname(sc->sc_dev), __func__));
   17620 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17621 	    || (sc->sc_type == WM_T_PCH_CNP));
   17622 
   17623 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17624 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17625 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17626 
   17627 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17628 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17629 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17630 }
   17631 
   17632 /* Sysctl functions */
   17633 static int
   17634 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17635 {
   17636 	struct sysctlnode node = *rnode;
   17637 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17638 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17639 	struct wm_softc *sc = txq->txq_sc;
   17640 	uint32_t reg;
   17641 
   17642 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17643 	node.sysctl_data = &reg;
   17644 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17645 }
   17646 
   17647 static int
   17648 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17649 {
   17650 	struct sysctlnode node = *rnode;
   17651 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17652 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17653 	struct wm_softc *sc = txq->txq_sc;
   17654 	uint32_t reg;
   17655 
   17656 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17657 	node.sysctl_data = &reg;
   17658 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17659 }
   17660 
   17661 #ifdef WM_DEBUG
   17662 static int
   17663 wm_sysctl_debug(SYSCTLFN_ARGS)
   17664 {
   17665 	struct sysctlnode node = *rnode;
   17666 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17667 	uint32_t dflags;
   17668 	int error;
   17669 
   17670 	dflags = sc->sc_debug;
   17671 	node.sysctl_data = &dflags;
   17672 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17673 
   17674 	if (error || newp == NULL)
   17675 		return error;
   17676 
   17677 	sc->sc_debug = dflags;
   17678 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17679 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17680 
   17681 	return 0;
   17682 }
   17683 #endif
   17684