Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.776
      1 /*	$NetBSD: if_wm.c,v 1.776 2023/05/11 07:19:02 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.776 2023/05/11 07:19:02 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_if_wm.h"
     89 #endif
     90 
     91 #include <sys/param.h>
     92 
     93 #include <sys/atomic.h>
     94 #include <sys/callout.h>
     95 #include <sys/cpu.h>
     96 #include <sys/device.h>
     97 #include <sys/errno.h>
     98 #include <sys/interrupt.h>
     99 #include <sys/ioctl.h>
    100 #include <sys/kernel.h>
    101 #include <sys/kmem.h>
    102 #include <sys/mbuf.h>
    103 #include <sys/pcq.h>
    104 #include <sys/queue.h>
    105 #include <sys/rndsource.h>
    106 #include <sys/socket.h>
    107 #include <sys/sysctl.h>
    108 #include <sys/syslog.h>
    109 #include <sys/systm.h>
    110 #include <sys/workqueue.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 #include <dev/mii/makphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 
    160 #if 0
    161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    163 	WM_DEBUG_LOCK
    164 #endif
    165 
    166 #define	DPRINTF(sc, x, y)			  \
    167 	do {					  \
    168 		if ((sc)->sc_debug & (x))	  \
    169 			printf y;		  \
    170 	} while (0)
    171 #else
    172 #define	DPRINTF(sc, x, y)	__nothing
    173 #endif /* WM_DEBUG */
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    305 #if !defined(WM_EVENT_COUNTERS)
    306 #define WM_EVENT_COUNTERS 1
    307 #endif
    308 #endif
    309 
    310 #ifdef WM_EVENT_COUNTERS
    311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    313 	struct evcnt qname##_ev_##evname
    314 
    315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    316 	do {								\
    317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    319 		    "%s%02d%s", #qname, (qnum), #evname);		\
    320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    321 		    (evtype), NULL, (xname),				\
    322 		    (q)->qname##_##evname##_evcnt_name);		\
    323 	} while (0)
    324 
    325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    327 
    328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    330 
    331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    332 	evcnt_detach(&(q)->qname##_ev_##evname)
    333 #endif /* WM_EVENT_COUNTERS */
    334 
    335 struct wm_txqueue {
    336 	kmutex_t *txq_lock;		/* lock for tx operations */
    337 
    338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    339 
    340 	/* Software state for the transmit descriptors. */
    341 	int txq_num;			/* must be a power of two */
    342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    343 
    344 	/* TX control data structures. */
    345 	int txq_ndesc;			/* must be a power of two */
    346 	size_t txq_descsize;		/* a tx descriptor size */
    347 	txdescs_t *txq_descs_u;
    348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    350 	int txq_desc_rseg;		/* real number of control segment */
    351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    352 #define	txq_descs	txq_descs_u->sctxu_txdescs
    353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    354 
    355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    356 
    357 	int txq_free;			/* number of free Tx descriptors */
    358 	int txq_next;			/* next ready Tx descriptor */
    359 
    360 	int txq_sfree;			/* number of free Tx jobs */
    361 	int txq_snext;			/* next free Tx job */
    362 	int txq_sdirty;			/* dirty Tx jobs */
    363 
    364 	/* These 4 variables are used only on the 82547. */
    365 	int txq_fifo_size;		/* Tx FIFO size */
    366 	int txq_fifo_head;		/* current head of FIFO */
    367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    369 
    370 	/*
    371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    372 	 * CPUs. This queue intermediate them without block.
    373 	 */
    374 	pcq_t *txq_interq;
    375 
    376 	/*
    377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    378 	 * to manage Tx H/W queue's busy flag.
    379 	 */
    380 	int txq_flags;			/* flags for H/W queue, see below */
    381 #define	WM_TXQ_NO_SPACE		0x1
    382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    383 
    384 	bool txq_stopping;
    385 
    386 	bool txq_sending;
    387 	time_t txq_lastsent;
    388 
    389 	/* Checksum flags used for previous packet */
    390 	uint32_t	txq_last_hw_cmd;
    391 	uint8_t		txq_last_hw_fields;
    392 	uint16_t	txq_last_hw_ipcs;
    393 	uint16_t	txq_last_hw_tucs;
    394 
    395 	uint32_t txq_packets;		/* for AIM */
    396 	uint32_t txq_bytes;		/* for AIM */
    397 #ifdef WM_EVENT_COUNTERS
    398 	/* TX event counters */
    399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    404 					    /* XXX not used? */
    405 
    406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    414 					    /* other than toomanyseg */
    415 
    416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    420 
    421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    423 #endif /* WM_EVENT_COUNTERS */
    424 };
    425 
    426 struct wm_rxqueue {
    427 	kmutex_t *rxq_lock;		/* lock for rx operations */
    428 
    429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    430 
    431 	/* Software state for the receive descriptors. */
    432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    433 
    434 	/* RX control data structures. */
    435 	int rxq_ndesc;			/* must be a power of two */
    436 	size_t rxq_descsize;		/* a rx descriptor size */
    437 	rxdescs_t *rxq_descs_u;
    438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    440 	int rxq_desc_rseg;		/* real number of control segment */
    441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    445 
    446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    447 
    448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    449 	int rxq_discard;
    450 	int rxq_len;
    451 	struct mbuf *rxq_head;
    452 	struct mbuf *rxq_tail;
    453 	struct mbuf **rxq_tailp;
    454 
    455 	bool rxq_stopping;
    456 
    457 	uint32_t rxq_packets;		/* for AIM */
    458 	uint32_t rxq_bytes;		/* for AIM */
    459 #ifdef WM_EVENT_COUNTERS
    460 	/* RX event counters */
    461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    463 
    464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    466 #endif
    467 };
    468 
    469 struct wm_queue {
    470 	int wmq_id;			/* index of TX/RX queues */
    471 	int wmq_intr_idx;		/* index of MSI-X tables */
    472 
    473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    474 	bool wmq_set_itr;
    475 
    476 	struct wm_txqueue wmq_txq;
    477 	struct wm_rxqueue wmq_rxq;
    478 	char sysctlname[32];		/* Name for sysctl */
    479 
    480 	bool wmq_txrx_use_workqueue;
    481 	bool wmq_wq_enqueued;
    482 	struct work wmq_cookie;
    483 	void *wmq_si;
    484 };
    485 
    486 struct wm_phyop {
    487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    488 	void (*release)(struct wm_softc *);
    489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    490 	int (*writereg_locked)(device_t, int, int, uint16_t);
    491 	int reset_delay_us;
    492 	bool no_errprint;
    493 };
    494 
    495 struct wm_nvmop {
    496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
    497 	void (*release)(struct wm_softc *);
    498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    499 };
    500 
    501 /*
    502  * Software state per device.
    503  */
    504 struct wm_softc {
    505 	device_t sc_dev;		/* generic device information */
    506 	bus_space_tag_t sc_st;		/* bus space tag */
    507 	bus_space_handle_t sc_sh;	/* bus space handle */
    508 	bus_size_t sc_ss;		/* bus space size */
    509 	bus_space_tag_t sc_iot;		/* I/O space tag */
    510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    511 	bus_size_t sc_ios;		/* I/O space size */
    512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    514 	bus_size_t sc_flashs;		/* flash registers space size */
    515 	off_t sc_flashreg_offset;	/*
    516 					 * offset to flash registers from
    517 					 * start of BAR
    518 					 */
    519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    520 
    521 	struct ethercom sc_ethercom;	/* Ethernet common data */
    522 	struct mii_data sc_mii;		/* MII/media information */
    523 
    524 	pci_chipset_tag_t sc_pc;
    525 	pcitag_t sc_pcitag;
    526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    528 
    529 	uint16_t sc_pcidevid;		/* PCI device ID */
    530 	wm_chip_type sc_type;		/* MAC type */
    531 	int sc_rev;			/* MAC revision */
    532 	wm_phy_type sc_phytype;		/* PHY type */
    533 	uint8_t sc_sfptype;		/* SFP type */
    534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    535 #define	WM_MEDIATYPE_UNKNOWN		0x00
    536 #define	WM_MEDIATYPE_FIBER		0x01
    537 #define	WM_MEDIATYPE_COPPER		0x02
    538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    540 	int sc_flags;			/* flags; see below */
    541 	u_short sc_if_flags;		/* last if_flags */
    542 	int sc_ec_capenable;		/* last ec_capenable */
    543 	int sc_flowflags;		/* 802.3x flow control flags */
    544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    545 	int sc_align_tweak;
    546 
    547 	void *sc_ihs[WM_MAX_NINTR];	/*
    548 					 * interrupt cookie.
    549 					 * - legacy and msi use sc_ihs[0] only
    550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    551 					 */
    552 	pci_intr_handle_t *sc_intrs;	/*
    553 					 * legacy and msi use sc_intrs[0] only
    554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    555 					 */
    556 	int sc_nintrs;			/* number of interrupts */
    557 
    558 	int sc_link_intr_idx;		/* index of MSI-X tables */
    559 
    560 	callout_t sc_tick_ch;		/* tick callout */
    561 	bool sc_core_stopping;
    562 
    563 	int sc_nvm_ver_major;
    564 	int sc_nvm_ver_minor;
    565 	int sc_nvm_ver_build;
    566 	int sc_nvm_addrbits;		/* NVM address bits */
    567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    568 	int sc_ich8_flash_base;
    569 	int sc_ich8_flash_bank_size;
    570 	int sc_nvm_k1_enabled;
    571 
    572 	int sc_nqueues;
    573 	struct wm_queue *sc_queue;
    574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    578 	struct workqueue *sc_queue_wq;
    579 	bool sc_txrx_use_workqueue;
    580 
    581 	int sc_affinity_offset;
    582 
    583 #ifdef WM_EVENT_COUNTERS
    584 	/* Event counters. */
    585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    586 
    587 	/* >= WM_T_82542_2_1 */
    588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    593 
    594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    599 	struct evcnt sc_ev_scc;		/* Single Collision */
    600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    602 	struct evcnt sc_ev_latecol;	/* Late Collision */
    603 	struct evcnt sc_ev_colc;	/* Collision */
    604 	struct evcnt sc_ev_cbtmpc;	/* Circuit Breaker Tx Mng. Packet */
    605 	struct evcnt sc_ev_dc;		/* Defer */
    606 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    607 	struct evcnt sc_ev_sec;		/* Sequence Error */
    608 
    609 	/* Old */
    610 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    611 	/* New */
    612 	struct evcnt sc_ev_htdpmc;	/* Host Tx Discarded Pkts by MAC */
    613 
    614 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    615 	struct evcnt sc_ev_cbrdpc;	/* Circuit Breaker Rx Dropped Packet */
    616 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    617 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    618 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    619 	struct evcnt sc_ev_prc511;	/* Packets Rx (255-511 bytes) */
    620 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    621 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    622 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    623 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    624 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    625 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    626 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    627 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    628 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    629 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    630 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    631 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    632 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    633 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    634 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    635 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    636 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    637 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    638 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    639 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    640 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    641 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    642 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    643 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    644 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    645 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    646 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    647 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx */
    648 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    649 
    650 	/* Old */
    651 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    652 	/* New */
    653 	struct evcnt sc_ev_cbrmpc;	/* Circuit Breaker Rx Mng. Packet */
    654 
    655 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    656 
    657 	/* Old */
    658 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    659 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    660 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    661 	struct evcnt sc_ev_ictxact;	/* Intr. Cause Tx Abs Timer Expire */
    662 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    663 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    664 	/*
    665 	 * sc_ev_rxdmtc is shared with both "Intr. cause" and
    666 	 * non "Intr. cause" register.
    667 	 */
    668 	struct evcnt sc_ev_rxdmtc;	/* (Intr. Cause) Rx Desc Min Thresh */
    669 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    670 	/* New */
    671 	struct evcnt sc_ev_rpthc;	/* Rx Packets To Host */
    672 	struct evcnt sc_ev_debug1;	/* Debug Counter 1 */
    673 	struct evcnt sc_ev_debug2;	/* Debug Counter 2 */
    674 	struct evcnt sc_ev_debug3;	/* Debug Counter 3 */
    675 	struct evcnt sc_ev_hgptc;	/* Host Good Packets TX */
    676 	struct evcnt sc_ev_debug4;	/* Debug Counter 4 */
    677 	struct evcnt sc_ev_htcbdpc;	/* Host Tx Circuit Breaker Drp. Pkts */
    678 	struct evcnt sc_ev_hgorc;	/* Host Good Octets Rx */
    679 	struct evcnt sc_ev_hgotc;	/* Host Good Octets Tx */
    680 	struct evcnt sc_ev_lenerrs;	/* Length Error */
    681 	struct evcnt sc_ev_tlpic;	/* EEE Tx LPI */
    682 	struct evcnt sc_ev_rlpic;	/* EEE Rx LPI */
    683 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    684 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    685 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    686 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    687 	struct evcnt sc_ev_scvpc;	/* SerDes/SGMII Code Violation Pkt. */
    688 	struct evcnt sc_ev_hrmpc;	/* Header Redirection Missed Packet */
    689 #endif /* WM_EVENT_COUNTERS */
    690 
    691 	struct sysctllog *sc_sysctllog;
    692 
    693 	/* This variable are used only on the 82547. */
    694 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    695 
    696 	uint32_t sc_ctrl;		/* prototype CTRL register */
    697 #if 0
    698 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    699 #endif
    700 	uint32_t sc_icr;		/* prototype interrupt bits */
    701 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    702 	uint32_t sc_tctl;		/* prototype TCTL register */
    703 	uint32_t sc_rctl;		/* prototype RCTL register */
    704 	uint32_t sc_txcw;		/* prototype TXCW register */
    705 	uint32_t sc_tipg;		/* prototype TIPG register */
    706 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    707 	uint32_t sc_pba;		/* prototype PBA register */
    708 
    709 	int sc_tbi_linkup;		/* TBI link status */
    710 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    711 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    712 
    713 	int sc_mchash_type;		/* multicast filter offset */
    714 
    715 	krndsource_t rnd_source;	/* random source */
    716 
    717 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    718 
    719 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    720 	kmutex_t *sc_ich_phymtx;	/*
    721 					 * 82574/82583/ICH/PCH specific PHY
    722 					 * mutex. For 82574/82583, the mutex
    723 					 * is used for both PHY and NVM.
    724 					 */
    725 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    726 
    727 	struct wm_phyop phy;
    728 	struct wm_nvmop nvm;
    729 
    730 	struct workqueue *sc_reset_wq;
    731 	struct work sc_reset_work;
    732 	volatile unsigned sc_reset_pending;
    733 
    734 	bool sc_dying;
    735 
    736 #ifdef WM_DEBUG
    737 	uint32_t sc_debug;
    738 	bool sc_trigger_reset;
    739 #endif
    740 };
    741 
    742 #define	WM_RXCHAIN_RESET(rxq)						\
    743 do {									\
    744 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    745 	*(rxq)->rxq_tailp = NULL;					\
    746 	(rxq)->rxq_len = 0;						\
    747 } while (/*CONSTCOND*/0)
    748 
    749 #define	WM_RXCHAIN_LINK(rxq, m)						\
    750 do {									\
    751 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    752 	(rxq)->rxq_tailp = &(m)->m_next;				\
    753 } while (/*CONSTCOND*/0)
    754 
    755 #ifdef WM_EVENT_COUNTERS
    756 #ifdef __HAVE_ATOMIC64_LOADSTORE
    757 #define	WM_EVCNT_INCR(ev)						\
    758 	atomic_store_relaxed(&((ev)->ev_count),				\
    759 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    760 #define	WM_EVCNT_ADD(ev, val)						\
    761 	atomic_store_relaxed(&((ev)->ev_count),				\
    762 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    763 #else
    764 #define	WM_EVCNT_INCR(ev)						\
    765 	((ev)->ev_count)++
    766 #define	WM_EVCNT_ADD(ev, val)						\
    767 	(ev)->ev_count += (val)
    768 #endif
    769 
    770 #define WM_Q_EVCNT_INCR(qname, evname)			\
    771 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    772 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    773 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    774 #else /* !WM_EVENT_COUNTERS */
    775 #define	WM_EVCNT_INCR(ev)	/* nothing */
    776 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    777 
    778 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    779 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    780 #endif /* !WM_EVENT_COUNTERS */
    781 
    782 #define	CSR_READ(sc, reg)						\
    783 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    784 #define	CSR_WRITE(sc, reg, val)						\
    785 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    786 #define	CSR_WRITE_FLUSH(sc)						\
    787 	(void)CSR_READ((sc), WMREG_STATUS)
    788 
    789 #define ICH8_FLASH_READ32(sc, reg)					\
    790 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    791 	    (reg) + sc->sc_flashreg_offset)
    792 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    793 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    794 	    (reg) + sc->sc_flashreg_offset, (data))
    795 
    796 #define ICH8_FLASH_READ16(sc, reg)					\
    797 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    798 	    (reg) + sc->sc_flashreg_offset)
    799 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    800 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    801 	    (reg) + sc->sc_flashreg_offset, (data))
    802 
    803 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    804 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    805 
    806 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    807 #define	WM_CDTXADDR_HI(txq, x)						\
    808 	(sizeof(bus_addr_t) == 8 ?					\
    809 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    810 
    811 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    812 #define	WM_CDRXADDR_HI(rxq, x)						\
    813 	(sizeof(bus_addr_t) == 8 ?					\
    814 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    815 
    816 /*
    817  * Register read/write functions.
    818  * Other than CSR_{READ|WRITE}().
    819  */
    820 #if 0
    821 static inline uint32_t wm_io_read(struct wm_softc *, int);
    822 #endif
    823 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    824 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    825     uint32_t, uint32_t);
    826 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    827 
    828 /*
    829  * Descriptor sync/init functions.
    830  */
    831 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    832 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    833 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    834 
    835 /*
    836  * Device driver interface functions and commonly used functions.
    837  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    838  */
    839 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    840 static int	wm_match(device_t, cfdata_t, void *);
    841 static void	wm_attach(device_t, device_t, void *);
    842 static int	wm_detach(device_t, int);
    843 static bool	wm_suspend(device_t, const pmf_qual_t *);
    844 static bool	wm_resume(device_t, const pmf_qual_t *);
    845 static bool	wm_watchdog(struct ifnet *);
    846 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    847     uint16_t *);
    848 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    849     uint16_t *);
    850 static void	wm_tick(void *);
    851 static int	wm_ifflags_cb(struct ethercom *);
    852 static int	wm_ioctl(struct ifnet *, u_long, void *);
    853 /* MAC address related */
    854 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    855 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    856 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    857 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    858 static int	wm_rar_count(struct wm_softc *);
    859 static void	wm_set_filter(struct wm_softc *);
    860 /* Reset and init related */
    861 static void	wm_set_vlan(struct wm_softc *);
    862 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    863 static void	wm_get_auto_rd_done(struct wm_softc *);
    864 static void	wm_lan_init_done(struct wm_softc *);
    865 static void	wm_get_cfg_done(struct wm_softc *);
    866 static int	wm_phy_post_reset(struct wm_softc *);
    867 static int	wm_write_smbus_addr(struct wm_softc *);
    868 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    869 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    870 static void	wm_initialize_hardware_bits(struct wm_softc *);
    871 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    872 static int	wm_reset_phy(struct wm_softc *);
    873 static void	wm_flush_desc_rings(struct wm_softc *);
    874 static void	wm_reset(struct wm_softc *);
    875 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    876 static void	wm_rxdrain(struct wm_rxqueue *);
    877 static void	wm_init_rss(struct wm_softc *);
    878 static void	wm_adjust_qnum(struct wm_softc *, int);
    879 static inline bool	wm_is_using_msix(struct wm_softc *);
    880 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    881 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    882 static int	wm_setup_legacy(struct wm_softc *);
    883 static int	wm_setup_msix(struct wm_softc *);
    884 static int	wm_init(struct ifnet *);
    885 static int	wm_init_locked(struct ifnet *);
    886 static void	wm_init_sysctls(struct wm_softc *);
    887 static void	wm_unset_stopping_flags(struct wm_softc *);
    888 static void	wm_set_stopping_flags(struct wm_softc *);
    889 static void	wm_stop(struct ifnet *, int);
    890 static void	wm_stop_locked(struct ifnet *, bool, bool);
    891 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    892 static void	wm_82547_txfifo_stall(void *);
    893 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    894 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    895 /* DMA related */
    896 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    897 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    898 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    899 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    900     struct wm_txqueue *);
    901 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    902 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    903 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    904     struct wm_rxqueue *);
    905 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    906 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    907 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    908 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    909 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    910 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    911 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    912     struct wm_txqueue *);
    913 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    914     struct wm_rxqueue *);
    915 static int	wm_alloc_txrx_queues(struct wm_softc *);
    916 static void	wm_free_txrx_queues(struct wm_softc *);
    917 static int	wm_init_txrx_queues(struct wm_softc *);
    918 /* Start */
    919 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    920     struct wm_txsoft *, uint32_t *, uint8_t *);
    921 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    922 static void	wm_start(struct ifnet *);
    923 static void	wm_start_locked(struct ifnet *);
    924 static int	wm_transmit(struct ifnet *, struct mbuf *);
    925 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    926 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    927     bool);
    928 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    929     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    930 static void	wm_nq_start(struct ifnet *);
    931 static void	wm_nq_start_locked(struct ifnet *);
    932 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    933 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    934 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    935     bool);
    936 static void	wm_deferred_start_locked(struct wm_txqueue *);
    937 static void	wm_handle_queue(void *);
    938 static void	wm_handle_queue_work(struct work *, void *);
    939 static void	wm_handle_reset_work(struct work *, void *);
    940 /* Interrupt */
    941 static bool	wm_txeof(struct wm_txqueue *, u_int);
    942 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    943 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    944 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    945 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    946 static void	wm_linkintr(struct wm_softc *, uint32_t);
    947 static int	wm_intr_legacy(void *);
    948 static inline void	wm_txrxintr_disable(struct wm_queue *);
    949 static inline void	wm_txrxintr_enable(struct wm_queue *);
    950 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    951 static int	wm_txrxintr_msix(void *);
    952 static int	wm_linkintr_msix(void *);
    953 
    954 /*
    955  * Media related.
    956  * GMII, SGMII, TBI, SERDES and SFP.
    957  */
    958 /* Common */
    959 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    960 /* GMII related */
    961 static void	wm_gmii_reset(struct wm_softc *);
    962 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    963 static int	wm_get_phy_id_82575(struct wm_softc *);
    964 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    965 static int	wm_gmii_mediachange(struct ifnet *);
    966 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    967 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    968 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    969 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    970 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    971 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    972 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    973 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    974 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    975 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    976 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    977 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    978 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    979 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    980 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    981 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    982 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    983 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    984 	bool);
    985 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    986 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    987 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    988 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    989 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    990 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    991 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    992 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    993 static void	wm_gmii_statchg(struct ifnet *);
    994 /*
    995  * kumeran related (80003, ICH* and PCH*).
    996  * These functions are not for accessing MII registers but for accessing
    997  * kumeran specific registers.
    998  */
    999 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
   1000 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
   1001 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
   1002 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
   1003 /* EMI register related */
   1004 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
   1005 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
   1006 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
   1007 /* SGMII */
   1008 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
   1009 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
   1010 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
   1011 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
   1012 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
   1013 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
   1014 /* TBI related */
   1015 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
   1016 static void	wm_tbi_mediainit(struct wm_softc *);
   1017 static int	wm_tbi_mediachange(struct ifnet *);
   1018 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
   1019 static int	wm_check_for_link(struct wm_softc *);
   1020 static void	wm_tbi_tick(struct wm_softc *);
   1021 /* SERDES related */
   1022 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1023 static int	wm_serdes_mediachange(struct ifnet *);
   1024 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1025 static void	wm_serdes_tick(struct wm_softc *);
   1026 /* SFP related */
   1027 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1028 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1029 
   1030 /*
   1031  * NVM related.
   1032  * Microwire, SPI (w/wo EERD) and Flash.
   1033  */
   1034 /* Misc functions */
   1035 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1036 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1037 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1038 /* Microwire */
   1039 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1040 /* SPI */
   1041 static int	wm_nvm_ready_spi(struct wm_softc *);
   1042 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1043 /* Using with EERD */
   1044 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1045 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1046 /* Flash */
   1047 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1048     unsigned int *);
   1049 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1050 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1051 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1052     uint32_t *);
   1053 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1054 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1055 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1056 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1057 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1058 /* iNVM */
   1059 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1060 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1061 /* Lock, detecting NVM type, validate checksum and read */
   1062 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1063 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1064 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1065 static void	wm_nvm_version_invm(struct wm_softc *);
   1066 static void	wm_nvm_version(struct wm_softc *);
   1067 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1068 
   1069 /*
   1070  * Hardware semaphores.
   1071  * Very complexed...
   1072  */
   1073 static int	wm_get_null(struct wm_softc *);
   1074 static void	wm_put_null(struct wm_softc *);
   1075 static int	wm_get_eecd(struct wm_softc *);
   1076 static void	wm_put_eecd(struct wm_softc *);
   1077 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1078 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1079 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1080 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1081 static int	wm_get_nvm_80003(struct wm_softc *);
   1082 static void	wm_put_nvm_80003(struct wm_softc *);
   1083 static int	wm_get_nvm_82571(struct wm_softc *);
   1084 static void	wm_put_nvm_82571(struct wm_softc *);
   1085 static int	wm_get_phy_82575(struct wm_softc *);
   1086 static void	wm_put_phy_82575(struct wm_softc *);
   1087 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1088 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1089 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1090 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1091 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1092 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1093 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1094 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1095 
   1096 /*
   1097  * Management mode and power management related subroutines.
   1098  * BMC, AMT, suspend/resume and EEE.
   1099  */
   1100 #if 0
   1101 static int	wm_check_mng_mode(struct wm_softc *);
   1102 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1103 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1104 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1105 #endif
   1106 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1107 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1108 static void	wm_get_hw_control(struct wm_softc *);
   1109 static void	wm_release_hw_control(struct wm_softc *);
   1110 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1111 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1112 static void	wm_init_manageability(struct wm_softc *);
   1113 static void	wm_release_manageability(struct wm_softc *);
   1114 static void	wm_get_wakeup(struct wm_softc *);
   1115 static int	wm_ulp_disable(struct wm_softc *);
   1116 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1117 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1118 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1119 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1120 static void	wm_enable_wakeup(struct wm_softc *);
   1121 static void	wm_disable_aspm(struct wm_softc *);
   1122 /* LPLU (Low Power Link Up) */
   1123 static void	wm_lplu_d0_disable(struct wm_softc *);
   1124 /* EEE */
   1125 static int	wm_set_eee_i350(struct wm_softc *);
   1126 static int	wm_set_eee_pchlan(struct wm_softc *);
   1127 static int	wm_set_eee(struct wm_softc *);
   1128 
   1129 /*
   1130  * Workarounds (mainly PHY related).
   1131  * Basically, PHY's workarounds are in the PHY drivers.
   1132  */
   1133 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1134 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1135 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1136 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1137 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1138 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1139 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1140 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1141 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1142 static int	wm_k1_workaround_lv(struct wm_softc *);
   1143 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1144 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1145 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1146 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1147 static void	wm_reset_init_script_82575(struct wm_softc *);
   1148 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1149 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1150 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1151 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1152 static int	wm_pll_workaround_i210(struct wm_softc *);
   1153 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1154 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1155 static void	wm_set_linkdown_discard(struct wm_softc *);
   1156 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1157 
   1158 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1159 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1160 #ifdef WM_DEBUG
   1161 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1162 #endif
   1163 
   1164 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1165     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1166 
   1167 /*
   1168  * Devices supported by this driver.
   1169  */
   1170 static const struct wm_product {
   1171 	pci_vendor_id_t		wmp_vendor;
   1172 	pci_product_id_t	wmp_product;
   1173 	const char		*wmp_name;
   1174 	wm_chip_type		wmp_type;
   1175 	uint32_t		wmp_flags;
   1176 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1177 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1178 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1179 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1180 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1181 } wm_products[] = {
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1183 	  "Intel i82542 1000BASE-X Ethernet",
   1184 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1187 	  "Intel i82543GC 1000BASE-X Ethernet",
   1188 	  WM_T_82543,		WMP_F_FIBER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1191 	  "Intel i82543GC 1000BASE-T Ethernet",
   1192 	  WM_T_82543,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1195 	  "Intel i82544EI 1000BASE-T Ethernet",
   1196 	  WM_T_82544,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1199 	  "Intel i82544EI 1000BASE-X Ethernet",
   1200 	  WM_T_82544,		WMP_F_FIBER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1203 	  "Intel i82544GC 1000BASE-T Ethernet",
   1204 	  WM_T_82544,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1207 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1208 	  WM_T_82544,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1211 	  "Intel i82540EM 1000BASE-T Ethernet",
   1212 	  WM_T_82540,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1215 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1216 	  WM_T_82540,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1219 	  "Intel i82540EP 1000BASE-T Ethernet",
   1220 	  WM_T_82540,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1223 	  "Intel i82540EP 1000BASE-T Ethernet",
   1224 	  WM_T_82540,		WMP_F_COPPER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1227 	  "Intel i82540EP 1000BASE-T Ethernet",
   1228 	  WM_T_82540,		WMP_F_COPPER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1231 	  "Intel i82545EM 1000BASE-T Ethernet",
   1232 	  WM_T_82545,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1235 	  "Intel i82545GM 1000BASE-T Ethernet",
   1236 	  WM_T_82545_3,		WMP_F_COPPER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1239 	  "Intel i82545GM 1000BASE-X Ethernet",
   1240 	  WM_T_82545_3,		WMP_F_FIBER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1243 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1244 	  WM_T_82545_3,		WMP_F_SERDES },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1247 	  "Intel i82546EB 1000BASE-T Ethernet",
   1248 	  WM_T_82546,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1251 	  "Intel i82546EB 1000BASE-T Ethernet",
   1252 	  WM_T_82546,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1255 	  "Intel i82545EM 1000BASE-X Ethernet",
   1256 	  WM_T_82545,		WMP_F_FIBER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1259 	  "Intel i82546EB 1000BASE-X Ethernet",
   1260 	  WM_T_82546,		WMP_F_FIBER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1263 	  "Intel i82546GB 1000BASE-T Ethernet",
   1264 	  WM_T_82546_3,		WMP_F_COPPER },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1267 	  "Intel i82546GB 1000BASE-X Ethernet",
   1268 	  WM_T_82546_3,		WMP_F_FIBER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1271 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1272 	  WM_T_82546_3,		WMP_F_SERDES },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1275 	  "i82546GB quad-port Gigabit Ethernet",
   1276 	  WM_T_82546_3,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1279 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1280 	  WM_T_82546_3,		WMP_F_COPPER },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1283 	  "Intel PRO/1000MT (82546GB)",
   1284 	  WM_T_82546_3,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1287 	  "Intel i82541EI 1000BASE-T Ethernet",
   1288 	  WM_T_82541,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1291 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1292 	  WM_T_82541,		WMP_F_COPPER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1295 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1296 	  WM_T_82541,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1299 	  "Intel i82541ER 1000BASE-T Ethernet",
   1300 	  WM_T_82541_2,		WMP_F_COPPER },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1303 	  "Intel i82541GI 1000BASE-T Ethernet",
   1304 	  WM_T_82541_2,		WMP_F_COPPER },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1307 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1308 	  WM_T_82541_2,		WMP_F_COPPER },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1311 	  "Intel i82541PI 1000BASE-T Ethernet",
   1312 	  WM_T_82541_2,		WMP_F_COPPER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1315 	  "Intel i82547EI 1000BASE-T Ethernet",
   1316 	  WM_T_82547,		WMP_F_COPPER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1319 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1320 	  WM_T_82547,		WMP_F_COPPER },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1323 	  "Intel i82547GI 1000BASE-T Ethernet",
   1324 	  WM_T_82547_2,		WMP_F_COPPER },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1327 	  "Intel PRO/1000 PT (82571EB)",
   1328 	  WM_T_82571,		WMP_F_COPPER },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1331 	  "Intel PRO/1000 PF (82571EB)",
   1332 	  WM_T_82571,		WMP_F_FIBER },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1335 	  "Intel PRO/1000 PB (82571EB)",
   1336 	  WM_T_82571,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1339 	  "Intel PRO/1000 QT (82571EB)",
   1340 	  WM_T_82571,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1343 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1344 	  WM_T_82571,		WMP_F_COPPER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1347 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1348 	  WM_T_82571,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1351 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1352 	  WM_T_82571,		WMP_F_SERDES },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1355 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1356 	  WM_T_82571,		WMP_F_SERDES },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1359 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1360 	  WM_T_82571,		WMP_F_FIBER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1363 	  "Intel i82572EI 1000baseT Ethernet",
   1364 	  WM_T_82572,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1367 	  "Intel i82572EI 1000baseX Ethernet",
   1368 	  WM_T_82572,		WMP_F_FIBER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1371 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1372 	  WM_T_82572,		WMP_F_SERDES },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1375 	  "Intel i82572EI 1000baseT Ethernet",
   1376 	  WM_T_82572,		WMP_F_COPPER },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1379 	  "Intel i82573E",
   1380 	  WM_T_82573,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1383 	  "Intel i82573E IAMT",
   1384 	  WM_T_82573,		WMP_F_COPPER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1387 	  "Intel i82573L Gigabit Ethernet",
   1388 	  WM_T_82573,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1391 	  "Intel i82574L",
   1392 	  WM_T_82574,		WMP_F_COPPER },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1395 	  "Intel i82574L",
   1396 	  WM_T_82574,		WMP_F_COPPER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1399 	  "Intel i82583V",
   1400 	  WM_T_82583,		WMP_F_COPPER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1403 	  "i80003 dual 1000baseT Ethernet",
   1404 	  WM_T_80003,		WMP_F_COPPER },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1407 	  "i80003 dual 1000baseX Ethernet",
   1408 	  WM_T_80003,		WMP_F_COPPER },
   1409 
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1411 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1412 	  WM_T_80003,		WMP_F_SERDES },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1415 	  "Intel i80003 1000baseT Ethernet",
   1416 	  WM_T_80003,		WMP_F_COPPER },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1419 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1420 	  WM_T_80003,		WMP_F_SERDES },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1423 	  "Intel i82801H (M_AMT) LAN Controller",
   1424 	  WM_T_ICH8,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1426 	  "Intel i82801H (AMT) LAN Controller",
   1427 	  WM_T_ICH8,		WMP_F_COPPER },
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1429 	  "Intel i82801H LAN Controller",
   1430 	  WM_T_ICH8,		WMP_F_COPPER },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1432 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1433 	  WM_T_ICH8,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1435 	  "Intel i82801H (M) LAN Controller",
   1436 	  WM_T_ICH8,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1438 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1439 	  WM_T_ICH8,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1441 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1442 	  WM_T_ICH8,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1444 	  "82567V-3 LAN Controller",
   1445 	  WM_T_ICH8,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1447 	  "82801I (AMT) LAN Controller",
   1448 	  WM_T_ICH9,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1450 	  "82801I 10/100 LAN Controller",
   1451 	  WM_T_ICH9,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1453 	  "82801I (G) 10/100 LAN Controller",
   1454 	  WM_T_ICH9,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1456 	  "82801I (GT) 10/100 LAN Controller",
   1457 	  WM_T_ICH9,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1459 	  "82801I (C) LAN Controller",
   1460 	  WM_T_ICH9,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1462 	  "82801I mobile LAN Controller",
   1463 	  WM_T_ICH9,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1465 	  "82801I mobile (V) LAN Controller",
   1466 	  WM_T_ICH9,		WMP_F_COPPER },
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1468 	  "82801I mobile (AMT) LAN Controller",
   1469 	  WM_T_ICH9,		WMP_F_COPPER },
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1471 	  "82567LM-4 LAN Controller",
   1472 	  WM_T_ICH9,		WMP_F_COPPER },
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1474 	  "82567LM-2 LAN Controller",
   1475 	  WM_T_ICH10,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1477 	  "82567LF-2 LAN Controller",
   1478 	  WM_T_ICH10,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1480 	  "82567LM-3 LAN Controller",
   1481 	  WM_T_ICH10,		WMP_F_COPPER },
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1483 	  "82567LF-3 LAN Controller",
   1484 	  WM_T_ICH10,		WMP_F_COPPER },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1486 	  "82567V-2 LAN Controller",
   1487 	  WM_T_ICH10,		WMP_F_COPPER },
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1489 	  "82567V-3? LAN Controller",
   1490 	  WM_T_ICH10,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1492 	  "HANKSVILLE LAN Controller",
   1493 	  WM_T_ICH10,		WMP_F_COPPER },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1495 	  "PCH LAN (82577LM) Controller",
   1496 	  WM_T_PCH,		WMP_F_COPPER },
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1498 	  "PCH LAN (82577LC) Controller",
   1499 	  WM_T_PCH,		WMP_F_COPPER },
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1501 	  "PCH LAN (82578DM) Controller",
   1502 	  WM_T_PCH,		WMP_F_COPPER },
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1504 	  "PCH LAN (82578DC) Controller",
   1505 	  WM_T_PCH,		WMP_F_COPPER },
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1507 	  "PCH2 LAN (82579LM) Controller",
   1508 	  WM_T_PCH2,		WMP_F_COPPER },
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1510 	  "PCH2 LAN (82579V) Controller",
   1511 	  WM_T_PCH2,		WMP_F_COPPER },
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1513 	  "82575EB dual-1000baseT Ethernet",
   1514 	  WM_T_82575,		WMP_F_COPPER },
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1516 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1517 	  WM_T_82575,		WMP_F_SERDES },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1519 	  "82575GB quad-1000baseT Ethernet",
   1520 	  WM_T_82575,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1522 	  "82575GB quad-1000baseT Ethernet (PM)",
   1523 	  WM_T_82575,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1525 	  "82576 1000BaseT Ethernet",
   1526 	  WM_T_82576,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1528 	  "82576 1000BaseX Ethernet",
   1529 	  WM_T_82576,		WMP_F_FIBER },
   1530 
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1532 	  "82576 gigabit Ethernet (SERDES)",
   1533 	  WM_T_82576,		WMP_F_SERDES },
   1534 
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1536 	  "82576 quad-1000BaseT Ethernet",
   1537 	  WM_T_82576,		WMP_F_COPPER },
   1538 
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1540 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1541 	  WM_T_82576,		WMP_F_COPPER },
   1542 
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1544 	  "82576 gigabit Ethernet",
   1545 	  WM_T_82576,		WMP_F_COPPER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1548 	  "82576 gigabit Ethernet (SERDES)",
   1549 	  WM_T_82576,		WMP_F_SERDES },
   1550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1551 	  "82576 quad-gigabit Ethernet (SERDES)",
   1552 	  WM_T_82576,		WMP_F_SERDES },
   1553 
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1555 	  "82580 1000BaseT Ethernet",
   1556 	  WM_T_82580,		WMP_F_COPPER },
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1558 	  "82580 1000BaseX Ethernet",
   1559 	  WM_T_82580,		WMP_F_FIBER },
   1560 
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1562 	  "82580 1000BaseT Ethernet (SERDES)",
   1563 	  WM_T_82580,		WMP_F_SERDES },
   1564 
   1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1566 	  "82580 gigabit Ethernet (SGMII)",
   1567 	  WM_T_82580,		WMP_F_COPPER },
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1569 	  "82580 dual-1000BaseT Ethernet",
   1570 	  WM_T_82580,		WMP_F_COPPER },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1573 	  "82580 quad-1000BaseX Ethernet",
   1574 	  WM_T_82580,		WMP_F_FIBER },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1577 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1578 	  WM_T_82580,		WMP_F_COPPER },
   1579 
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1581 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1582 	  WM_T_82580,		WMP_F_SERDES },
   1583 
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1585 	  "DH89XXCC 1000BASE-KX Ethernet",
   1586 	  WM_T_82580,		WMP_F_SERDES },
   1587 
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1589 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1590 	  WM_T_82580,		WMP_F_SERDES },
   1591 
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1593 	  "I350 Gigabit Network Connection",
   1594 	  WM_T_I350,		WMP_F_COPPER },
   1595 
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1597 	  "I350 Gigabit Fiber Network Connection",
   1598 	  WM_T_I350,		WMP_F_FIBER },
   1599 
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1601 	  "I350 Gigabit Backplane Connection",
   1602 	  WM_T_I350,		WMP_F_SERDES },
   1603 
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1605 	  "I350 Quad Port Gigabit Ethernet",
   1606 	  WM_T_I350,		WMP_F_SERDES },
   1607 
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1609 	  "I350 Gigabit Connection",
   1610 	  WM_T_I350,		WMP_F_COPPER },
   1611 
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1613 	  "I354 Gigabit Ethernet (KX)",
   1614 	  WM_T_I354,		WMP_F_SERDES },
   1615 
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1617 	  "I354 Gigabit Ethernet (SGMII)",
   1618 	  WM_T_I354,		WMP_F_COPPER },
   1619 
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1621 	  "I354 Gigabit Ethernet (2.5G)",
   1622 	  WM_T_I354,		WMP_F_COPPER },
   1623 
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1625 	  "I210-T1 Ethernet Server Adapter",
   1626 	  WM_T_I210,		WMP_F_COPPER },
   1627 
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1629 	  "I210 Ethernet (Copper OEM)",
   1630 	  WM_T_I210,		WMP_F_COPPER },
   1631 
   1632 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1633 	  "I210 Ethernet (Copper IT)",
   1634 	  WM_T_I210,		WMP_F_COPPER },
   1635 
   1636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1637 	  "I210 Ethernet (Copper, FLASH less)",
   1638 	  WM_T_I210,		WMP_F_COPPER },
   1639 
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1641 	  "I210 Gigabit Ethernet (Fiber)",
   1642 	  WM_T_I210,		WMP_F_FIBER },
   1643 
   1644 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1645 	  "I210 Gigabit Ethernet (SERDES)",
   1646 	  WM_T_I210,		WMP_F_SERDES },
   1647 
   1648 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1649 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1650 	  WM_T_I210,		WMP_F_SERDES },
   1651 
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1653 	  "I210 Gigabit Ethernet (SGMII)",
   1654 	  WM_T_I210,		WMP_F_COPPER },
   1655 
   1656 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1657 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1658 	  WM_T_I210,		WMP_F_COPPER },
   1659 
   1660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1661 	  "I211 Ethernet (COPPER)",
   1662 	  WM_T_I211,		WMP_F_COPPER },
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1664 	  "I217 V Ethernet Connection",
   1665 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1667 	  "I217 LM Ethernet Connection",
   1668 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1670 	  "I218 V Ethernet Connection",
   1671 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1673 	  "I218 V Ethernet Connection",
   1674 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1675 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1676 	  "I218 V Ethernet Connection",
   1677 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1679 	  "I218 LM Ethernet Connection",
   1680 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1682 	  "I218 LM Ethernet Connection",
   1683 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1685 	  "I218 LM Ethernet Connection",
   1686 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1687 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1688 	  "I219 LM Ethernet Connection",
   1689 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1691 	  "I219 LM (2) Ethernet Connection",
   1692 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1694 	  "I219 LM (3) Ethernet Connection",
   1695 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1697 	  "I219 LM (4) Ethernet Connection",
   1698 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1699 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1700 	  "I219 LM (5) Ethernet Connection",
   1701 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1703 	  "I219 LM (6) Ethernet Connection",
   1704 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1706 	  "I219 LM (7) Ethernet Connection",
   1707 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1709 	  "I219 LM (8) Ethernet Connection",
   1710 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1711 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1712 	  "I219 LM (9) Ethernet Connection",
   1713 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1714 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1715 	  "I219 LM (10) Ethernet Connection",
   1716 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1718 	  "I219 LM (11) Ethernet Connection",
   1719 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1720 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1721 	  "I219 LM (12) Ethernet Connection",
   1722 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1723 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1724 	  "I219 LM (13) Ethernet Connection",
   1725 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1727 	  "I219 LM (14) Ethernet Connection",
   1728 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1730 	  "I219 LM (15) Ethernet Connection",
   1731 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1732 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1733 	  "I219 LM (16) Ethernet Connection",
   1734 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1735 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1736 	  "I219 LM (17) Ethernet Connection",
   1737 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1739 	  "I219 LM (18) Ethernet Connection",
   1740 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1742 	  "I219 LM (19) Ethernet Connection",
   1743 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1744 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1745 	  "I219 V Ethernet Connection",
   1746 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1747 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1748 	  "I219 V (2) Ethernet Connection",
   1749 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1751 	  "I219 V (4) Ethernet Connection",
   1752 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1754 	  "I219 V (5) Ethernet Connection",
   1755 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1757 	  "I219 V (6) Ethernet Connection",
   1758 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1759 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1760 	  "I219 V (7) Ethernet Connection",
   1761 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1763 	  "I219 V (8) Ethernet Connection",
   1764 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1765 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1766 	  "I219 V (9) Ethernet Connection",
   1767 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1769 	  "I219 V (10) Ethernet Connection",
   1770 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1771 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1772 	  "I219 V (11) Ethernet Connection",
   1773 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1774 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1775 	  "I219 V (12) Ethernet Connection",
   1776 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1777 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1778 	  "I219 V (13) Ethernet Connection",
   1779 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1780 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1781 	  "I219 V (14) Ethernet Connection",
   1782 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1783 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1784 	  "I219 V (15) Ethernet Connection",
   1785 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1787 	  "I219 V (16) Ethernet Connection",
   1788 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1789 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1790 	  "I219 V (17) Ethernet Connection",
   1791 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1792 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1793 	  "I219 V (18) Ethernet Connection",
   1794 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1795 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1796 	  "I219 V (19) Ethernet Connection",
   1797 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1798 	{ 0,			0,
   1799 	  NULL,
   1800 	  0,			0 },
   1801 };
   1802 
   1803 /*
   1804  * Register read/write functions.
   1805  * Other than CSR_{READ|WRITE}().
   1806  */
   1807 
   1808 #if 0 /* Not currently used */
   1809 static inline uint32_t
   1810 wm_io_read(struct wm_softc *sc, int reg)
   1811 {
   1812 
   1813 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1814 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1815 }
   1816 #endif
   1817 
   1818 static inline void
   1819 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1820 {
   1821 
   1822 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1823 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1824 }
   1825 
   1826 static inline void
   1827 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1828     uint32_t data)
   1829 {
   1830 	uint32_t regval;
   1831 	int i;
   1832 
   1833 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1834 
   1835 	CSR_WRITE(sc, reg, regval);
   1836 
   1837 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1838 		delay(5);
   1839 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1840 			break;
   1841 	}
   1842 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1843 		aprint_error("%s: WARNING:"
   1844 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1845 		    device_xname(sc->sc_dev), reg);
   1846 	}
   1847 }
   1848 
   1849 static inline void
   1850 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1851 {
   1852 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1853 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1854 }
   1855 
   1856 /*
   1857  * Descriptor sync/init functions.
   1858  */
   1859 static inline void
   1860 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1861 {
   1862 	struct wm_softc *sc = txq->txq_sc;
   1863 
   1864 	/* If it will wrap around, sync to the end of the ring. */
   1865 	if ((start + num) > WM_NTXDESC(txq)) {
   1866 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1867 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1868 		    (WM_NTXDESC(txq) - start), ops);
   1869 		num -= (WM_NTXDESC(txq) - start);
   1870 		start = 0;
   1871 	}
   1872 
   1873 	/* Now sync whatever is left. */
   1874 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1875 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1876 }
   1877 
   1878 static inline void
   1879 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1880 {
   1881 	struct wm_softc *sc = rxq->rxq_sc;
   1882 
   1883 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1884 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1885 }
   1886 
   1887 static inline void
   1888 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1889 {
   1890 	struct wm_softc *sc = rxq->rxq_sc;
   1891 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1892 	struct mbuf *m = rxs->rxs_mbuf;
   1893 
   1894 	/*
   1895 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1896 	 * so that the payload after the Ethernet header is aligned
   1897 	 * to a 4-byte boundary.
   1898 
   1899 	 * XXX BRAINDAMAGE ALERT!
   1900 	 * The stupid chip uses the same size for every buffer, which
   1901 	 * is set in the Receive Control register.  We are using the 2K
   1902 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1903 	 * reason, we can't "scoot" packets longer than the standard
   1904 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1905 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1906 	 * the upper layer copy the headers.
   1907 	 */
   1908 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1909 
   1910 	if (sc->sc_type == WM_T_82574) {
   1911 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1912 		rxd->erx_data.erxd_addr =
   1913 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1914 		rxd->erx_data.erxd_dd = 0;
   1915 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1916 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1917 
   1918 		rxd->nqrx_data.nrxd_paddr =
   1919 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1920 		/* Currently, split header is not supported. */
   1921 		rxd->nqrx_data.nrxd_haddr = 0;
   1922 	} else {
   1923 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1924 
   1925 		wm_set_dma_addr(&rxd->wrx_addr,
   1926 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1927 		rxd->wrx_len = 0;
   1928 		rxd->wrx_cksum = 0;
   1929 		rxd->wrx_status = 0;
   1930 		rxd->wrx_errors = 0;
   1931 		rxd->wrx_special = 0;
   1932 	}
   1933 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1934 
   1935 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1936 }
   1937 
   1938 /*
   1939  * Device driver interface functions and commonly used functions.
   1940  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1941  */
   1942 
   1943 /* Lookup supported device table */
   1944 static const struct wm_product *
   1945 wm_lookup(const struct pci_attach_args *pa)
   1946 {
   1947 	const struct wm_product *wmp;
   1948 
   1949 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1950 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1951 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1952 			return wmp;
   1953 	}
   1954 	return NULL;
   1955 }
   1956 
   1957 /* The match function (ca_match) */
   1958 static int
   1959 wm_match(device_t parent, cfdata_t cf, void *aux)
   1960 {
   1961 	struct pci_attach_args *pa = aux;
   1962 
   1963 	if (wm_lookup(pa) != NULL)
   1964 		return 1;
   1965 
   1966 	return 0;
   1967 }
   1968 
   1969 /* The attach function (ca_attach) */
   1970 static void
   1971 wm_attach(device_t parent, device_t self, void *aux)
   1972 {
   1973 	struct wm_softc *sc = device_private(self);
   1974 	struct pci_attach_args *pa = aux;
   1975 	prop_dictionary_t dict;
   1976 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1977 	pci_chipset_tag_t pc = pa->pa_pc;
   1978 	int counts[PCI_INTR_TYPE_SIZE];
   1979 	pci_intr_type_t max_type;
   1980 	const char *eetype, *xname;
   1981 	bus_space_tag_t memt;
   1982 	bus_space_handle_t memh;
   1983 	bus_size_t memsize;
   1984 	int memh_valid;
   1985 	int i, error;
   1986 	const struct wm_product *wmp;
   1987 	prop_data_t ea;
   1988 	prop_number_t pn;
   1989 	uint8_t enaddr[ETHER_ADDR_LEN];
   1990 	char buf[256];
   1991 	char wqname[MAXCOMLEN];
   1992 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1993 	pcireg_t preg, memtype;
   1994 	uint16_t eeprom_data, apme_mask;
   1995 	bool force_clear_smbi;
   1996 	uint32_t link_mode;
   1997 	uint32_t reg;
   1998 
   1999 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   2000 	sc->sc_debug = WM_DEBUG_DEFAULT;
   2001 #endif
   2002 	sc->sc_dev = self;
   2003 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
   2004 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   2005 	sc->sc_core_stopping = false;
   2006 
   2007 	wmp = wm_lookup(pa);
   2008 #ifdef DIAGNOSTIC
   2009 	if (wmp == NULL) {
   2010 		printf("\n");
   2011 		panic("wm_attach: impossible");
   2012 	}
   2013 #endif
   2014 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   2015 
   2016 	sc->sc_pc = pa->pa_pc;
   2017 	sc->sc_pcitag = pa->pa_tag;
   2018 
   2019 	if (pci_dma64_available(pa)) {
   2020 		aprint_verbose(", 64-bit DMA");
   2021 		sc->sc_dmat = pa->pa_dmat64;
   2022 	} else {
   2023 		aprint_verbose(", 32-bit DMA");
   2024 		sc->sc_dmat = pa->pa_dmat;
   2025 	}
   2026 
   2027 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2028 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2029 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2030 
   2031 	sc->sc_type = wmp->wmp_type;
   2032 
   2033 	/* Set default function pointers */
   2034 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2035 	sc->phy.release = sc->nvm.release = wm_put_null;
   2036 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2037 
   2038 	if (sc->sc_type < WM_T_82543) {
   2039 		if (sc->sc_rev < 2) {
   2040 			aprint_error_dev(sc->sc_dev,
   2041 			    "i82542 must be at least rev. 2\n");
   2042 			return;
   2043 		}
   2044 		if (sc->sc_rev < 3)
   2045 			sc->sc_type = WM_T_82542_2_0;
   2046 	}
   2047 
   2048 	/*
   2049 	 * Disable MSI for Errata:
   2050 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2051 	 *
   2052 	 *  82544: Errata 25
   2053 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2054 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2055 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2056 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2057 	 *
   2058 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2059 	 *
   2060 	 *  82571 & 82572: Errata 63
   2061 	 */
   2062 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2063 	    || (sc->sc_type == WM_T_82572))
   2064 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2065 
   2066 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2067 	    || (sc->sc_type == WM_T_82580)
   2068 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2069 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2070 		sc->sc_flags |= WM_F_NEWQUEUE;
   2071 
   2072 	/* Set device properties (mactype) */
   2073 	dict = device_properties(sc->sc_dev);
   2074 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2075 
   2076 	/*
   2077 	 * Map the device.  All devices support memory-mapped acccess,
   2078 	 * and it is really required for normal operation.
   2079 	 */
   2080 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2081 	switch (memtype) {
   2082 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2083 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2084 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2085 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2086 		break;
   2087 	default:
   2088 		memh_valid = 0;
   2089 		break;
   2090 	}
   2091 
   2092 	if (memh_valid) {
   2093 		sc->sc_st = memt;
   2094 		sc->sc_sh = memh;
   2095 		sc->sc_ss = memsize;
   2096 	} else {
   2097 		aprint_error_dev(sc->sc_dev,
   2098 		    "unable to map device registers\n");
   2099 		return;
   2100 	}
   2101 
   2102 	/*
   2103 	 * In addition, i82544 and later support I/O mapped indirect
   2104 	 * register access.  It is not desirable (nor supported in
   2105 	 * this driver) to use it for normal operation, though it is
   2106 	 * required to work around bugs in some chip versions.
   2107 	 */
   2108 	switch (sc->sc_type) {
   2109 	case WM_T_82544:
   2110 	case WM_T_82541:
   2111 	case WM_T_82541_2:
   2112 	case WM_T_82547:
   2113 	case WM_T_82547_2:
   2114 		/* First we have to find the I/O BAR. */
   2115 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2116 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2117 			if (memtype == PCI_MAPREG_TYPE_IO)
   2118 				break;
   2119 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2120 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2121 				i += 4;	/* skip high bits, too */
   2122 		}
   2123 		if (i < PCI_MAPREG_END) {
   2124 			/*
   2125 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2126 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2127 			 * It's no problem because newer chips has no this
   2128 			 * bug.
   2129 			 *
   2130 			 * The i8254x doesn't apparently respond when the
   2131 			 * I/O BAR is 0, which looks somewhat like it's not
   2132 			 * been configured.
   2133 			 */
   2134 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2135 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2136 				aprint_error_dev(sc->sc_dev,
   2137 				    "WARNING: I/O BAR at zero.\n");
   2138 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2139 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2140 			    == 0) {
   2141 				sc->sc_flags |= WM_F_IOH_VALID;
   2142 			} else
   2143 				aprint_error_dev(sc->sc_dev,
   2144 				    "WARNING: unable to map I/O space\n");
   2145 		}
   2146 		break;
   2147 	default:
   2148 		break;
   2149 	}
   2150 
   2151 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2152 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2153 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2154 	if (sc->sc_type < WM_T_82542_2_1)
   2155 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2156 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2157 
   2158 	/* Power up chip */
   2159 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2160 	    && error != EOPNOTSUPP) {
   2161 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2162 		return;
   2163 	}
   2164 
   2165 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2166 	/*
   2167 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2168 	 * resource.
   2169 	 */
   2170 	if (sc->sc_nqueues > 1) {
   2171 		max_type = PCI_INTR_TYPE_MSIX;
   2172 		/*
   2173 		 *  82583 has a MSI-X capability in the PCI configuration space
   2174 		 * but it doesn't support it. At least the document doesn't
   2175 		 * say anything about MSI-X.
   2176 		 */
   2177 		counts[PCI_INTR_TYPE_MSIX]
   2178 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2179 	} else {
   2180 		max_type = PCI_INTR_TYPE_MSI;
   2181 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2182 	}
   2183 
   2184 	/* Allocation settings */
   2185 	counts[PCI_INTR_TYPE_MSI] = 1;
   2186 	counts[PCI_INTR_TYPE_INTX] = 1;
   2187 	/* overridden by disable flags */
   2188 	if (wm_disable_msi != 0) {
   2189 		counts[PCI_INTR_TYPE_MSI] = 0;
   2190 		if (wm_disable_msix != 0) {
   2191 			max_type = PCI_INTR_TYPE_INTX;
   2192 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2193 		}
   2194 	} else if (wm_disable_msix != 0) {
   2195 		max_type = PCI_INTR_TYPE_MSI;
   2196 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2197 	}
   2198 
   2199 alloc_retry:
   2200 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2201 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2202 		return;
   2203 	}
   2204 
   2205 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2206 		error = wm_setup_msix(sc);
   2207 		if (error) {
   2208 			pci_intr_release(pc, sc->sc_intrs,
   2209 			    counts[PCI_INTR_TYPE_MSIX]);
   2210 
   2211 			/* Setup for MSI: Disable MSI-X */
   2212 			max_type = PCI_INTR_TYPE_MSI;
   2213 			counts[PCI_INTR_TYPE_MSI] = 1;
   2214 			counts[PCI_INTR_TYPE_INTX] = 1;
   2215 			goto alloc_retry;
   2216 		}
   2217 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2218 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2219 		error = wm_setup_legacy(sc);
   2220 		if (error) {
   2221 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2222 			    counts[PCI_INTR_TYPE_MSI]);
   2223 
   2224 			/* The next try is for INTx: Disable MSI */
   2225 			max_type = PCI_INTR_TYPE_INTX;
   2226 			counts[PCI_INTR_TYPE_INTX] = 1;
   2227 			goto alloc_retry;
   2228 		}
   2229 	} else {
   2230 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2231 		error = wm_setup_legacy(sc);
   2232 		if (error) {
   2233 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2234 			    counts[PCI_INTR_TYPE_INTX]);
   2235 			return;
   2236 		}
   2237 	}
   2238 
   2239 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2240 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2241 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2242 	    WQ_PERCPU | WQ_MPSAFE);
   2243 	if (error) {
   2244 		aprint_error_dev(sc->sc_dev,
   2245 		    "unable to create TxRx workqueue\n");
   2246 		goto out;
   2247 	}
   2248 
   2249 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
   2250 	error = workqueue_create(&sc->sc_reset_wq, wqname,
   2251 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
   2252 	    WQ_MPSAFE);
   2253 	if (error) {
   2254 		workqueue_destroy(sc->sc_queue_wq);
   2255 		aprint_error_dev(sc->sc_dev,
   2256 		    "unable to create reset workqueue\n");
   2257 		goto out;
   2258 	}
   2259 
   2260 	/*
   2261 	 * Check the function ID (unit number of the chip).
   2262 	 */
   2263 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2264 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2265 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2266 	    || (sc->sc_type == WM_T_82580)
   2267 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2268 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2269 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2270 	else
   2271 		sc->sc_funcid = 0;
   2272 
   2273 	/*
   2274 	 * Determine a few things about the bus we're connected to.
   2275 	 */
   2276 	if (sc->sc_type < WM_T_82543) {
   2277 		/* We don't really know the bus characteristics here. */
   2278 		sc->sc_bus_speed = 33;
   2279 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2280 		/*
   2281 		 * CSA (Communication Streaming Architecture) is about as fast
   2282 		 * a 32-bit 66MHz PCI Bus.
   2283 		 */
   2284 		sc->sc_flags |= WM_F_CSA;
   2285 		sc->sc_bus_speed = 66;
   2286 		aprint_verbose_dev(sc->sc_dev,
   2287 		    "Communication Streaming Architecture\n");
   2288 		if (sc->sc_type == WM_T_82547) {
   2289 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
   2290 			callout_setfunc(&sc->sc_txfifo_ch,
   2291 			    wm_82547_txfifo_stall, sc);
   2292 			aprint_verbose_dev(sc->sc_dev,
   2293 			    "using 82547 Tx FIFO stall work-around\n");
   2294 		}
   2295 	} else if (sc->sc_type >= WM_T_82571) {
   2296 		sc->sc_flags |= WM_F_PCIE;
   2297 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2298 		    && (sc->sc_type != WM_T_ICH10)
   2299 		    && (sc->sc_type != WM_T_PCH)
   2300 		    && (sc->sc_type != WM_T_PCH2)
   2301 		    && (sc->sc_type != WM_T_PCH_LPT)
   2302 		    && (sc->sc_type != WM_T_PCH_SPT)
   2303 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2304 			/* ICH* and PCH* have no PCIe capability registers */
   2305 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2306 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2307 				NULL) == 0)
   2308 				aprint_error_dev(sc->sc_dev,
   2309 				    "unable to find PCIe capability\n");
   2310 		}
   2311 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2312 	} else {
   2313 		reg = CSR_READ(sc, WMREG_STATUS);
   2314 		if (reg & STATUS_BUS64)
   2315 			sc->sc_flags |= WM_F_BUS64;
   2316 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2317 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2318 
   2319 			sc->sc_flags |= WM_F_PCIX;
   2320 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2321 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2322 				aprint_error_dev(sc->sc_dev,
   2323 				    "unable to find PCIX capability\n");
   2324 			else if (sc->sc_type != WM_T_82545_3 &&
   2325 			    sc->sc_type != WM_T_82546_3) {
   2326 				/*
   2327 				 * Work around a problem caused by the BIOS
   2328 				 * setting the max memory read byte count
   2329 				 * incorrectly.
   2330 				 */
   2331 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2332 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2333 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2334 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2335 
   2336 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2337 				    PCIX_CMD_BYTECNT_SHIFT;
   2338 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2339 				    PCIX_STATUS_MAXB_SHIFT;
   2340 				if (bytecnt > maxb) {
   2341 					aprint_verbose_dev(sc->sc_dev,
   2342 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2343 					    512 << bytecnt, 512 << maxb);
   2344 					pcix_cmd = (pcix_cmd &
   2345 					    ~PCIX_CMD_BYTECNT_MASK) |
   2346 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2347 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2348 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2349 					    pcix_cmd);
   2350 				}
   2351 			}
   2352 		}
   2353 		/*
   2354 		 * The quad port adapter is special; it has a PCIX-PCIX
   2355 		 * bridge on the board, and can run the secondary bus at
   2356 		 * a higher speed.
   2357 		 */
   2358 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2359 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2360 								      : 66;
   2361 		} else if (sc->sc_flags & WM_F_PCIX) {
   2362 			switch (reg & STATUS_PCIXSPD_MASK) {
   2363 			case STATUS_PCIXSPD_50_66:
   2364 				sc->sc_bus_speed = 66;
   2365 				break;
   2366 			case STATUS_PCIXSPD_66_100:
   2367 				sc->sc_bus_speed = 100;
   2368 				break;
   2369 			case STATUS_PCIXSPD_100_133:
   2370 				sc->sc_bus_speed = 133;
   2371 				break;
   2372 			default:
   2373 				aprint_error_dev(sc->sc_dev,
   2374 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2375 				    reg & STATUS_PCIXSPD_MASK);
   2376 				sc->sc_bus_speed = 66;
   2377 				break;
   2378 			}
   2379 		} else
   2380 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2381 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2382 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2383 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2384 	}
   2385 
   2386 	/* clear interesting stat counters */
   2387 	CSR_READ(sc, WMREG_COLC);
   2388 	CSR_READ(sc, WMREG_RXERRC);
   2389 
   2390 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2391 	    || (sc->sc_type >= WM_T_ICH8))
   2392 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2393 	if (sc->sc_type >= WM_T_ICH8)
   2394 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2395 
   2396 	/* Set PHY, NVM mutex related stuff */
   2397 	switch (sc->sc_type) {
   2398 	case WM_T_82542_2_0:
   2399 	case WM_T_82542_2_1:
   2400 	case WM_T_82543:
   2401 	case WM_T_82544:
   2402 		/* Microwire */
   2403 		sc->nvm.read = wm_nvm_read_uwire;
   2404 		sc->sc_nvm_wordsize = 64;
   2405 		sc->sc_nvm_addrbits = 6;
   2406 		break;
   2407 	case WM_T_82540:
   2408 	case WM_T_82545:
   2409 	case WM_T_82545_3:
   2410 	case WM_T_82546:
   2411 	case WM_T_82546_3:
   2412 		/* Microwire */
   2413 		sc->nvm.read = wm_nvm_read_uwire;
   2414 		reg = CSR_READ(sc, WMREG_EECD);
   2415 		if (reg & EECD_EE_SIZE) {
   2416 			sc->sc_nvm_wordsize = 256;
   2417 			sc->sc_nvm_addrbits = 8;
   2418 		} else {
   2419 			sc->sc_nvm_wordsize = 64;
   2420 			sc->sc_nvm_addrbits = 6;
   2421 		}
   2422 		sc->sc_flags |= WM_F_LOCK_EECD;
   2423 		sc->nvm.acquire = wm_get_eecd;
   2424 		sc->nvm.release = wm_put_eecd;
   2425 		break;
   2426 	case WM_T_82541:
   2427 	case WM_T_82541_2:
   2428 	case WM_T_82547:
   2429 	case WM_T_82547_2:
   2430 		reg = CSR_READ(sc, WMREG_EECD);
   2431 		/*
   2432 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2433 		 * on 8254[17], so set flags and functios before calling it.
   2434 		 */
   2435 		sc->sc_flags |= WM_F_LOCK_EECD;
   2436 		sc->nvm.acquire = wm_get_eecd;
   2437 		sc->nvm.release = wm_put_eecd;
   2438 		if (reg & EECD_EE_TYPE) {
   2439 			/* SPI */
   2440 			sc->nvm.read = wm_nvm_read_spi;
   2441 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2442 			wm_nvm_set_addrbits_size_eecd(sc);
   2443 		} else {
   2444 			/* Microwire */
   2445 			sc->nvm.read = wm_nvm_read_uwire;
   2446 			if ((reg & EECD_EE_ABITS) != 0) {
   2447 				sc->sc_nvm_wordsize = 256;
   2448 				sc->sc_nvm_addrbits = 8;
   2449 			} else {
   2450 				sc->sc_nvm_wordsize = 64;
   2451 				sc->sc_nvm_addrbits = 6;
   2452 			}
   2453 		}
   2454 		break;
   2455 	case WM_T_82571:
   2456 	case WM_T_82572:
   2457 		/* SPI */
   2458 		sc->nvm.read = wm_nvm_read_eerd;
   2459 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2460 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2461 		wm_nvm_set_addrbits_size_eecd(sc);
   2462 		sc->phy.acquire = wm_get_swsm_semaphore;
   2463 		sc->phy.release = wm_put_swsm_semaphore;
   2464 		sc->nvm.acquire = wm_get_nvm_82571;
   2465 		sc->nvm.release = wm_put_nvm_82571;
   2466 		break;
   2467 	case WM_T_82573:
   2468 	case WM_T_82574:
   2469 	case WM_T_82583:
   2470 		sc->nvm.read = wm_nvm_read_eerd;
   2471 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2472 		if (sc->sc_type == WM_T_82573) {
   2473 			sc->phy.acquire = wm_get_swsm_semaphore;
   2474 			sc->phy.release = wm_put_swsm_semaphore;
   2475 			sc->nvm.acquire = wm_get_nvm_82571;
   2476 			sc->nvm.release = wm_put_nvm_82571;
   2477 		} else {
   2478 			/* Both PHY and NVM use the same semaphore. */
   2479 			sc->phy.acquire = sc->nvm.acquire
   2480 			    = wm_get_swfwhw_semaphore;
   2481 			sc->phy.release = sc->nvm.release
   2482 			    = wm_put_swfwhw_semaphore;
   2483 		}
   2484 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2485 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2486 			sc->sc_nvm_wordsize = 2048;
   2487 		} else {
   2488 			/* SPI */
   2489 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2490 			wm_nvm_set_addrbits_size_eecd(sc);
   2491 		}
   2492 		break;
   2493 	case WM_T_82575:
   2494 	case WM_T_82576:
   2495 	case WM_T_82580:
   2496 	case WM_T_I350:
   2497 	case WM_T_I354:
   2498 	case WM_T_80003:
   2499 		/* SPI */
   2500 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2501 		wm_nvm_set_addrbits_size_eecd(sc);
   2502 		if ((sc->sc_type == WM_T_80003)
   2503 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2504 			sc->nvm.read = wm_nvm_read_eerd;
   2505 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2506 		} else {
   2507 			sc->nvm.read = wm_nvm_read_spi;
   2508 			sc->sc_flags |= WM_F_LOCK_EECD;
   2509 		}
   2510 		sc->phy.acquire = wm_get_phy_82575;
   2511 		sc->phy.release = wm_put_phy_82575;
   2512 		sc->nvm.acquire = wm_get_nvm_80003;
   2513 		sc->nvm.release = wm_put_nvm_80003;
   2514 		break;
   2515 	case WM_T_ICH8:
   2516 	case WM_T_ICH9:
   2517 	case WM_T_ICH10:
   2518 	case WM_T_PCH:
   2519 	case WM_T_PCH2:
   2520 	case WM_T_PCH_LPT:
   2521 		sc->nvm.read = wm_nvm_read_ich8;
   2522 		/* FLASH */
   2523 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2524 		sc->sc_nvm_wordsize = 2048;
   2525 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2526 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2527 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2528 			aprint_error_dev(sc->sc_dev,
   2529 			    "can't map FLASH registers\n");
   2530 			goto out;
   2531 		}
   2532 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2533 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2534 		    ICH_FLASH_SECTOR_SIZE;
   2535 		sc->sc_ich8_flash_bank_size =
   2536 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2537 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2538 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2539 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2540 		sc->sc_flashreg_offset = 0;
   2541 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2542 		sc->phy.release = wm_put_swflag_ich8lan;
   2543 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2544 		sc->nvm.release = wm_put_nvm_ich8lan;
   2545 		break;
   2546 	case WM_T_PCH_SPT:
   2547 	case WM_T_PCH_CNP:
   2548 		sc->nvm.read = wm_nvm_read_spt;
   2549 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2550 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2551 		sc->sc_flasht = sc->sc_st;
   2552 		sc->sc_flashh = sc->sc_sh;
   2553 		sc->sc_ich8_flash_base = 0;
   2554 		sc->sc_nvm_wordsize =
   2555 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2556 		    * NVM_SIZE_MULTIPLIER;
   2557 		/* It is size in bytes, we want words */
   2558 		sc->sc_nvm_wordsize /= 2;
   2559 		/* Assume 2 banks */
   2560 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2561 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2562 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2563 		sc->phy.release = wm_put_swflag_ich8lan;
   2564 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2565 		sc->nvm.release = wm_put_nvm_ich8lan;
   2566 		break;
   2567 	case WM_T_I210:
   2568 	case WM_T_I211:
   2569 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2570 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2571 		if (wm_nvm_flash_presence_i210(sc)) {
   2572 			sc->nvm.read = wm_nvm_read_eerd;
   2573 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2574 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2575 			wm_nvm_set_addrbits_size_eecd(sc);
   2576 		} else {
   2577 			sc->nvm.read = wm_nvm_read_invm;
   2578 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2579 			sc->sc_nvm_wordsize = INVM_SIZE;
   2580 		}
   2581 		sc->phy.acquire = wm_get_phy_82575;
   2582 		sc->phy.release = wm_put_phy_82575;
   2583 		sc->nvm.acquire = wm_get_nvm_80003;
   2584 		sc->nvm.release = wm_put_nvm_80003;
   2585 		break;
   2586 	default:
   2587 		break;
   2588 	}
   2589 
   2590 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2591 	switch (sc->sc_type) {
   2592 	case WM_T_82571:
   2593 	case WM_T_82572:
   2594 		reg = CSR_READ(sc, WMREG_SWSM2);
   2595 		if ((reg & SWSM2_LOCK) == 0) {
   2596 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2597 			force_clear_smbi = true;
   2598 		} else
   2599 			force_clear_smbi = false;
   2600 		break;
   2601 	case WM_T_82573:
   2602 	case WM_T_82574:
   2603 	case WM_T_82583:
   2604 		force_clear_smbi = true;
   2605 		break;
   2606 	default:
   2607 		force_clear_smbi = false;
   2608 		break;
   2609 	}
   2610 	if (force_clear_smbi) {
   2611 		reg = CSR_READ(sc, WMREG_SWSM);
   2612 		if ((reg & SWSM_SMBI) != 0)
   2613 			aprint_error_dev(sc->sc_dev,
   2614 			    "Please update the Bootagent\n");
   2615 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2616 	}
   2617 
   2618 	/*
   2619 	 * Defer printing the EEPROM type until after verifying the checksum
   2620 	 * This allows the EEPROM type to be printed correctly in the case
   2621 	 * that no EEPROM is attached.
   2622 	 */
   2623 	/*
   2624 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2625 	 * this for later, so we can fail future reads from the EEPROM.
   2626 	 */
   2627 	if (wm_nvm_validate_checksum(sc)) {
   2628 		/*
   2629 		 * Read twice again because some PCI-e parts fail the
   2630 		 * first check due to the link being in sleep state.
   2631 		 */
   2632 		if (wm_nvm_validate_checksum(sc))
   2633 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2634 	}
   2635 
   2636 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2637 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2638 	else {
   2639 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2640 		    sc->sc_nvm_wordsize);
   2641 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2642 			aprint_verbose("iNVM");
   2643 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2644 			aprint_verbose("FLASH(HW)");
   2645 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2646 			aprint_verbose("FLASH");
   2647 		else {
   2648 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2649 				eetype = "SPI";
   2650 			else
   2651 				eetype = "MicroWire";
   2652 			aprint_verbose("(%d address bits) %s EEPROM",
   2653 			    sc->sc_nvm_addrbits, eetype);
   2654 		}
   2655 	}
   2656 	wm_nvm_version(sc);
   2657 	aprint_verbose("\n");
   2658 
   2659 	/*
   2660 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2661 	 * incorrect.
   2662 	 */
   2663 	wm_gmii_setup_phytype(sc, 0, 0);
   2664 
   2665 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2666 	switch (sc->sc_type) {
   2667 	case WM_T_ICH8:
   2668 	case WM_T_ICH9:
   2669 	case WM_T_ICH10:
   2670 	case WM_T_PCH:
   2671 	case WM_T_PCH2:
   2672 	case WM_T_PCH_LPT:
   2673 	case WM_T_PCH_SPT:
   2674 	case WM_T_PCH_CNP:
   2675 		apme_mask = WUC_APME;
   2676 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2677 		if ((eeprom_data & apme_mask) != 0)
   2678 			sc->sc_flags |= WM_F_WOL;
   2679 		break;
   2680 	default:
   2681 		break;
   2682 	}
   2683 
   2684 	/* Reset the chip to a known state. */
   2685 	wm_reset(sc);
   2686 
   2687 	/*
   2688 	 * Check for I21[01] PLL workaround.
   2689 	 *
   2690 	 * Three cases:
   2691 	 * a) Chip is I211.
   2692 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2693 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2694 	 */
   2695 	if (sc->sc_type == WM_T_I211)
   2696 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2697 	if (sc->sc_type == WM_T_I210) {
   2698 		if (!wm_nvm_flash_presence_i210(sc))
   2699 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2700 		else if ((sc->sc_nvm_ver_major < 3)
   2701 		    || ((sc->sc_nvm_ver_major == 3)
   2702 			&& (sc->sc_nvm_ver_minor < 25))) {
   2703 			aprint_verbose_dev(sc->sc_dev,
   2704 			    "ROM image version %d.%d is older than 3.25\n",
   2705 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2706 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2707 		}
   2708 	}
   2709 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2710 		wm_pll_workaround_i210(sc);
   2711 
   2712 	wm_get_wakeup(sc);
   2713 
   2714 	/* Non-AMT based hardware can now take control from firmware */
   2715 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2716 		wm_get_hw_control(sc);
   2717 
   2718 	/*
   2719 	 * Read the Ethernet address from the EEPROM, if not first found
   2720 	 * in device properties.
   2721 	 */
   2722 	ea = prop_dictionary_get(dict, "mac-address");
   2723 	if (ea != NULL) {
   2724 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2725 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2726 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2727 	} else {
   2728 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2729 			aprint_error_dev(sc->sc_dev,
   2730 			    "unable to read Ethernet address\n");
   2731 			goto out;
   2732 		}
   2733 	}
   2734 
   2735 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2736 	    ether_sprintf(enaddr));
   2737 
   2738 	/*
   2739 	 * Read the config info from the EEPROM, and set up various
   2740 	 * bits in the control registers based on their contents.
   2741 	 */
   2742 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2743 	if (pn != NULL) {
   2744 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2745 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2746 	} else {
   2747 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2748 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2749 			goto out;
   2750 		}
   2751 	}
   2752 
   2753 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2754 	if (pn != NULL) {
   2755 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2756 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2757 	} else {
   2758 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2759 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2760 			goto out;
   2761 		}
   2762 	}
   2763 
   2764 	/* check for WM_F_WOL */
   2765 	switch (sc->sc_type) {
   2766 	case WM_T_82542_2_0:
   2767 	case WM_T_82542_2_1:
   2768 	case WM_T_82543:
   2769 		/* dummy? */
   2770 		eeprom_data = 0;
   2771 		apme_mask = NVM_CFG3_APME;
   2772 		break;
   2773 	case WM_T_82544:
   2774 		apme_mask = NVM_CFG2_82544_APM_EN;
   2775 		eeprom_data = cfg2;
   2776 		break;
   2777 	case WM_T_82546:
   2778 	case WM_T_82546_3:
   2779 	case WM_T_82571:
   2780 	case WM_T_82572:
   2781 	case WM_T_82573:
   2782 	case WM_T_82574:
   2783 	case WM_T_82583:
   2784 	case WM_T_80003:
   2785 	case WM_T_82575:
   2786 	case WM_T_82576:
   2787 		apme_mask = NVM_CFG3_APME;
   2788 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2789 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2790 		break;
   2791 	case WM_T_82580:
   2792 	case WM_T_I350:
   2793 	case WM_T_I354:
   2794 	case WM_T_I210:
   2795 	case WM_T_I211:
   2796 		apme_mask = NVM_CFG3_APME;
   2797 		wm_nvm_read(sc,
   2798 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2799 		    1, &eeprom_data);
   2800 		break;
   2801 	case WM_T_ICH8:
   2802 	case WM_T_ICH9:
   2803 	case WM_T_ICH10:
   2804 	case WM_T_PCH:
   2805 	case WM_T_PCH2:
   2806 	case WM_T_PCH_LPT:
   2807 	case WM_T_PCH_SPT:
   2808 	case WM_T_PCH_CNP:
   2809 		/* Already checked before wm_reset () */
   2810 		apme_mask = eeprom_data = 0;
   2811 		break;
   2812 	default: /* XXX 82540 */
   2813 		apme_mask = NVM_CFG3_APME;
   2814 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2815 		break;
   2816 	}
   2817 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2818 	if ((eeprom_data & apme_mask) != 0)
   2819 		sc->sc_flags |= WM_F_WOL;
   2820 
   2821 	/*
   2822 	 * We have the eeprom settings, now apply the special cases
   2823 	 * where the eeprom may be wrong or the board won't support
   2824 	 * wake on lan on a particular port
   2825 	 */
   2826 	switch (sc->sc_pcidevid) {
   2827 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2828 		sc->sc_flags &= ~WM_F_WOL;
   2829 		break;
   2830 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2831 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2832 		/* Wake events only supported on port A for dual fiber
   2833 		 * regardless of eeprom setting */
   2834 		if (sc->sc_funcid == 1)
   2835 			sc->sc_flags &= ~WM_F_WOL;
   2836 		break;
   2837 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2838 		/* If quad port adapter, disable WoL on all but port A */
   2839 		if (sc->sc_funcid != 0)
   2840 			sc->sc_flags &= ~WM_F_WOL;
   2841 		break;
   2842 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2843 		/* Wake events only supported on port A for dual fiber
   2844 		 * regardless of eeprom setting */
   2845 		if (sc->sc_funcid == 1)
   2846 			sc->sc_flags &= ~WM_F_WOL;
   2847 		break;
   2848 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2849 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2850 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2851 		/* If quad port adapter, disable WoL on all but port A */
   2852 		if (sc->sc_funcid != 0)
   2853 			sc->sc_flags &= ~WM_F_WOL;
   2854 		break;
   2855 	}
   2856 
   2857 	if (sc->sc_type >= WM_T_82575) {
   2858 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2859 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2860 			    nvmword);
   2861 			if ((sc->sc_type == WM_T_82575) ||
   2862 			    (sc->sc_type == WM_T_82576)) {
   2863 				/* Check NVM for autonegotiation */
   2864 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2865 				    != 0)
   2866 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2867 			}
   2868 			if ((sc->sc_type == WM_T_82575) ||
   2869 			    (sc->sc_type == WM_T_I350)) {
   2870 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2871 					sc->sc_flags |= WM_F_MAS;
   2872 			}
   2873 		}
   2874 	}
   2875 
   2876 	/*
   2877 	 * XXX need special handling for some multiple port cards
   2878 	 * to disable a paticular port.
   2879 	 */
   2880 
   2881 	if (sc->sc_type >= WM_T_82544) {
   2882 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2883 		if (pn != NULL) {
   2884 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2885 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2886 		} else {
   2887 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2888 				aprint_error_dev(sc->sc_dev,
   2889 				    "unable to read SWDPIN\n");
   2890 				goto out;
   2891 			}
   2892 		}
   2893 	}
   2894 
   2895 	if (cfg1 & NVM_CFG1_ILOS)
   2896 		sc->sc_ctrl |= CTRL_ILOS;
   2897 
   2898 	/*
   2899 	 * XXX
   2900 	 * This code isn't correct because pin 2 and 3 are located
   2901 	 * in different position on newer chips. Check all datasheet.
   2902 	 *
   2903 	 * Until resolve this problem, check if a chip < 82580
   2904 	 */
   2905 	if (sc->sc_type <= WM_T_82580) {
   2906 		if (sc->sc_type >= WM_T_82544) {
   2907 			sc->sc_ctrl |=
   2908 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2909 			    CTRL_SWDPIO_SHIFT;
   2910 			sc->sc_ctrl |=
   2911 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2912 			    CTRL_SWDPINS_SHIFT;
   2913 		} else {
   2914 			sc->sc_ctrl |=
   2915 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2916 			    CTRL_SWDPIO_SHIFT;
   2917 		}
   2918 	}
   2919 
   2920 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2921 		wm_nvm_read(sc,
   2922 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2923 		    1, &nvmword);
   2924 		if (nvmword & NVM_CFG3_ILOS)
   2925 			sc->sc_ctrl |= CTRL_ILOS;
   2926 	}
   2927 
   2928 #if 0
   2929 	if (sc->sc_type >= WM_T_82544) {
   2930 		if (cfg1 & NVM_CFG1_IPS0)
   2931 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2932 		if (cfg1 & NVM_CFG1_IPS1)
   2933 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2934 		sc->sc_ctrl_ext |=
   2935 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2936 		    CTRL_EXT_SWDPIO_SHIFT;
   2937 		sc->sc_ctrl_ext |=
   2938 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2939 		    CTRL_EXT_SWDPINS_SHIFT;
   2940 	} else {
   2941 		sc->sc_ctrl_ext |=
   2942 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2943 		    CTRL_EXT_SWDPIO_SHIFT;
   2944 	}
   2945 #endif
   2946 
   2947 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2948 #if 0
   2949 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2950 #endif
   2951 
   2952 	if (sc->sc_type == WM_T_PCH) {
   2953 		uint16_t val;
   2954 
   2955 		/* Save the NVM K1 bit setting */
   2956 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2957 
   2958 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2959 			sc->sc_nvm_k1_enabled = 1;
   2960 		else
   2961 			sc->sc_nvm_k1_enabled = 0;
   2962 	}
   2963 
   2964 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2965 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2966 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2967 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2968 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2969 	    || sc->sc_type == WM_T_82573
   2970 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2971 		/* Copper only */
   2972 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2973 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2974 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2975 	    || (sc->sc_type ==WM_T_I211)) {
   2976 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2977 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2978 		switch (link_mode) {
   2979 		case CTRL_EXT_LINK_MODE_1000KX:
   2980 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2981 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2982 			break;
   2983 		case CTRL_EXT_LINK_MODE_SGMII:
   2984 			if (wm_sgmii_uses_mdio(sc)) {
   2985 				aprint_normal_dev(sc->sc_dev,
   2986 				    "SGMII(MDIO)\n");
   2987 				sc->sc_flags |= WM_F_SGMII;
   2988 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2989 				break;
   2990 			}
   2991 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2992 			/*FALLTHROUGH*/
   2993 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2994 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2995 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2996 				if (link_mode
   2997 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2998 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2999 					sc->sc_flags |= WM_F_SGMII;
   3000 					aprint_verbose_dev(sc->sc_dev,
   3001 					    "SGMII\n");
   3002 				} else {
   3003 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   3004 					aprint_verbose_dev(sc->sc_dev,
   3005 					    "SERDES\n");
   3006 				}
   3007 				break;
   3008 			}
   3009 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   3010 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   3011 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3012 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   3013 				sc->sc_flags |= WM_F_SGMII;
   3014 			}
   3015 			/* Do not change link mode for 100BaseFX */
   3016 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   3017 				break;
   3018 
   3019 			/* Change current link mode setting */
   3020 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   3021 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3022 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   3023 			else
   3024 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   3025 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3026 			break;
   3027 		case CTRL_EXT_LINK_MODE_GMII:
   3028 		default:
   3029 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   3030 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3031 			break;
   3032 		}
   3033 
   3034 		reg &= ~CTRL_EXT_I2C_ENA;
   3035 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3036 			reg |= CTRL_EXT_I2C_ENA;
   3037 		else
   3038 			reg &= ~CTRL_EXT_I2C_ENA;
   3039 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3040 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3041 			if (!wm_sgmii_uses_mdio(sc))
   3042 				wm_gmii_setup_phytype(sc, 0, 0);
   3043 			wm_reset_mdicnfg_82580(sc);
   3044 		}
   3045 	} else if (sc->sc_type < WM_T_82543 ||
   3046 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3047 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3048 			aprint_error_dev(sc->sc_dev,
   3049 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3050 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3051 		}
   3052 	} else {
   3053 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3054 			aprint_error_dev(sc->sc_dev,
   3055 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3056 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3057 		}
   3058 	}
   3059 
   3060 	if (sc->sc_type >= WM_T_PCH2)
   3061 		sc->sc_flags |= WM_F_EEE;
   3062 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3063 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3064 		/* XXX: Need special handling for I354. (not yet) */
   3065 		if (sc->sc_type != WM_T_I354)
   3066 			sc->sc_flags |= WM_F_EEE;
   3067 	}
   3068 
   3069 	/*
   3070 	 * The I350 has a bug where it always strips the CRC whether
   3071 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3072 	 */
   3073 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3074 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3075 		sc->sc_flags |= WM_F_CRC_STRIP;
   3076 
   3077 	/* Set device properties (macflags) */
   3078 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3079 
   3080 	if (sc->sc_flags != 0) {
   3081 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3082 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3083 	}
   3084 
   3085 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3086 
   3087 	/* Initialize the media structures accordingly. */
   3088 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3089 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3090 	else
   3091 		wm_tbi_mediainit(sc); /* All others */
   3092 
   3093 	ifp = &sc->sc_ethercom.ec_if;
   3094 	xname = device_xname(sc->sc_dev);
   3095 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3096 	ifp->if_softc = sc;
   3097 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3098 	ifp->if_extflags = IFEF_MPSAFE;
   3099 	ifp->if_ioctl = wm_ioctl;
   3100 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3101 		ifp->if_start = wm_nq_start;
   3102 		/*
   3103 		 * When the number of CPUs is one and the controller can use
   3104 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3105 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3106 		 * and the other is used for link status changing.
   3107 		 * In this situation, wm_nq_transmit() is disadvantageous
   3108 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3109 		 */
   3110 		if (wm_is_using_multiqueue(sc))
   3111 			ifp->if_transmit = wm_nq_transmit;
   3112 	} else {
   3113 		ifp->if_start = wm_start;
   3114 		/*
   3115 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3116 		 * described above.
   3117 		 */
   3118 		if (wm_is_using_multiqueue(sc))
   3119 			ifp->if_transmit = wm_transmit;
   3120 	}
   3121 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3122 	ifp->if_init = wm_init;
   3123 	ifp->if_stop = wm_stop;
   3124 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3125 	IFQ_SET_READY(&ifp->if_snd);
   3126 
   3127 	/* Check for jumbo frame */
   3128 	switch (sc->sc_type) {
   3129 	case WM_T_82573:
   3130 		/* XXX limited to 9234 if ASPM is disabled */
   3131 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3132 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3133 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3134 		break;
   3135 	case WM_T_82571:
   3136 	case WM_T_82572:
   3137 	case WM_T_82574:
   3138 	case WM_T_82583:
   3139 	case WM_T_82575:
   3140 	case WM_T_82576:
   3141 	case WM_T_82580:
   3142 	case WM_T_I350:
   3143 	case WM_T_I354:
   3144 	case WM_T_I210:
   3145 	case WM_T_I211:
   3146 	case WM_T_80003:
   3147 	case WM_T_ICH9:
   3148 	case WM_T_ICH10:
   3149 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3150 	case WM_T_PCH_LPT:
   3151 	case WM_T_PCH_SPT:
   3152 	case WM_T_PCH_CNP:
   3153 		/* XXX limited to 9234 */
   3154 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3155 		break;
   3156 	case WM_T_PCH:
   3157 		/* XXX limited to 4096 */
   3158 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3159 		break;
   3160 	case WM_T_82542_2_0:
   3161 	case WM_T_82542_2_1:
   3162 	case WM_T_ICH8:
   3163 		/* No support for jumbo frame */
   3164 		break;
   3165 	default:
   3166 		/* ETHER_MAX_LEN_JUMBO */
   3167 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3168 		break;
   3169 	}
   3170 
   3171 	/* If we're a i82543 or greater, we can support VLANs. */
   3172 	if (sc->sc_type >= WM_T_82543) {
   3173 		sc->sc_ethercom.ec_capabilities |=
   3174 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3175 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3176 	}
   3177 
   3178 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3179 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3180 
   3181 	/*
   3182 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3183 	 * on i82543 and later.
   3184 	 */
   3185 	if (sc->sc_type >= WM_T_82543) {
   3186 		ifp->if_capabilities |=
   3187 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3188 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3189 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3190 		    IFCAP_CSUM_TCPv6_Tx |
   3191 		    IFCAP_CSUM_UDPv6_Tx;
   3192 	}
   3193 
   3194 	/*
   3195 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3196 	 *
   3197 	 *	82541GI (8086:1076) ... no
   3198 	 *	82572EI (8086:10b9) ... yes
   3199 	 */
   3200 	if (sc->sc_type >= WM_T_82571) {
   3201 		ifp->if_capabilities |=
   3202 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3203 	}
   3204 
   3205 	/*
   3206 	 * If we're a i82544 or greater (except i82547), we can do
   3207 	 * TCP segmentation offload.
   3208 	 */
   3209 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3210 		ifp->if_capabilities |= IFCAP_TSOv4;
   3211 
   3212 	if (sc->sc_type >= WM_T_82571)
   3213 		ifp->if_capabilities |= IFCAP_TSOv6;
   3214 
   3215 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3216 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3217 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3218 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3219 
   3220 	/* Attach the interface. */
   3221 	if_initialize(ifp);
   3222 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3223 	ether_ifattach(ifp, enaddr);
   3224 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3225 	if_register(ifp);
   3226 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3227 	    RND_FLAG_DEFAULT);
   3228 
   3229 #ifdef WM_EVENT_COUNTERS
   3230 	/* Attach event counters. */
   3231 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3232 	    NULL, xname, "linkintr");
   3233 
   3234 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3235 	    NULL, xname, "CRC Error");
   3236 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3237 	    NULL, xname, "Symbol Error");
   3238 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3239 	    NULL, xname, "Missed Packets");
   3240 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3241 	    NULL, xname, "Collision");
   3242 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3243 	    NULL, xname, "Sequence Error");
   3244 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3245 	    NULL, xname, "Receive Length Error");
   3246 
   3247 	if (sc->sc_type >= WM_T_82543) {
   3248 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3249 		    NULL, xname, "Alignment Error");
   3250 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3251 		    NULL, xname, "Receive Error");
   3252 		/* XXX Does 82575 have HTDPMC? */
   3253 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3254 			evcnt_attach_dynamic(&sc->sc_ev_cexterr,
   3255 			    EVCNT_TYPE_MISC, NULL, xname,
   3256 			    "Carrier Extension Error");
   3257 		else
   3258 			evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
   3259 			    EVCNT_TYPE_MISC, NULL, xname,
   3260 			    "Host Transmit Discarded Packets by MAC");
   3261 
   3262 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3263 		    NULL, xname, "Tx with No CRS");
   3264 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3265 		    NULL, xname, "TCP Segmentation Context Tx");
   3266 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3267 			evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
   3268 			    EVCNT_TYPE_MISC, NULL, xname,
   3269 			    "TCP Segmentation Context Tx Fail");
   3270 		else {
   3271 			/* XXX Is the circuit breaker only for 82576? */
   3272 			evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
   3273 			    EVCNT_TYPE_MISC, NULL, xname,
   3274 			    "Circuit Breaker Rx Dropped Packet");
   3275 			evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
   3276 			    EVCNT_TYPE_MISC, NULL, xname,
   3277 			    "Circuit Breaker Rx Manageability Packet");
   3278 		}
   3279 	}
   3280 
   3281 	if (sc->sc_type >= WM_T_82542_2_1) {
   3282 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3283 		    NULL, xname, "tx_xoff");
   3284 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3285 		    NULL, xname, "tx_xon");
   3286 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3287 		    NULL, xname, "rx_xoff");
   3288 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3289 		    NULL, xname, "rx_xon");
   3290 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3291 		    NULL, xname, "rx_macctl");
   3292 	}
   3293 
   3294 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3295 	    NULL, xname, "Single Collision");
   3296 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3297 	    NULL, xname, "Excessive Collisions");
   3298 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3299 	    NULL, xname, "Multiple Collision");
   3300 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3301 	    NULL, xname, "Late Collisions");
   3302 
   3303 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3304 		evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
   3305 		    NULL, xname, "Circuit Breaker Tx Manageability Packet");
   3306 
   3307 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3308 	    NULL, xname, "Defer");
   3309 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3310 	    NULL, xname, "Packets Rx (64 bytes)");
   3311 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3312 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3313 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3314 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3315 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3316 	    NULL, xname, "Packets Rx (255-511 bytes)");
   3317 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3318 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3319 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3320 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3321 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3322 	    NULL, xname, "Good Packets Rx");
   3323 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3324 	    NULL, xname, "Broadcast Packets Rx");
   3325 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3326 	    NULL, xname, "Multicast Packets Rx");
   3327 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3328 	    NULL, xname, "Good Packets Tx");
   3329 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3330 	    NULL, xname, "Good Octets Rx");
   3331 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3332 	    NULL, xname, "Good Octets Tx");
   3333 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3334 	    NULL, xname, "Rx No Buffers");
   3335 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3336 	    NULL, xname, "Rx Undersize");
   3337 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3338 	    NULL, xname, "Rx Fragment");
   3339 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3340 	    NULL, xname, "Rx Oversize");
   3341 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3342 	    NULL, xname, "Rx Jabber");
   3343 	if (sc->sc_type >= WM_T_82540) {
   3344 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3345 		    NULL, xname, "Management Packets RX");
   3346 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3347 		    NULL, xname, "Management Packets Dropped");
   3348 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3349 		    NULL, xname, "Management Packets TX");
   3350 	}
   3351 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3352 	    NULL, xname, "Total Octets Rx");
   3353 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3354 	    NULL, xname, "Total Octets Tx");
   3355 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3356 	    NULL, xname, "Total Packets Rx");
   3357 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3358 	    NULL, xname, "Total Packets Tx");
   3359 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3360 	    NULL, xname, "Packets Tx (64 bytes)");
   3361 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3362 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3363 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3364 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3365 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3366 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3367 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3368 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3369 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3370 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3371 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3372 	    NULL, xname, "Multicast Packets Tx");
   3373 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3374 	    NULL, xname, "Broadcast Packets Tx");
   3375 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3376 	    NULL, xname, "Interrupt Assertion");
   3377 	if (sc->sc_type < WM_T_82575) {
   3378 		evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3379 		    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3380 		evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3381 		    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3382 		evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3383 		    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3384 		evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
   3385 		    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3386 		evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3387 		    NULL, xname, "Intr. Cause Tx Queue Empty");
   3388 		evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3389 		    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3390 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3391 		    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3392 
   3393 		/* XXX 82575 document says it has ICRXOC. Is that right? */
   3394 		evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3395 		    NULL, xname, "Interrupt Cause Receiver Overrun");
   3396 	} else if (!WM_IS_ICHPCH(sc)) {
   3397 		/*
   3398 		 * For 82575 and newer.
   3399 		 *
   3400 		 * On 80003, ICHs and PCHs, it seems all of the following
   3401 		 * registers are zero.
   3402 		 */
   3403 		evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
   3404 		    NULL, xname, "Rx Packets To Host");
   3405 		evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
   3406 		    NULL, xname, "Debug Counter 1");
   3407 		evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
   3408 		    NULL, xname, "Debug Counter 2");
   3409 		evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
   3410 		    NULL, xname, "Debug Counter 3");
   3411 
   3412 		/*
   3413 		 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
   3414 		 * I think it's wrong. The real count I observed is the same
   3415 		 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
   3416 		 * It's HGPTC(Host Good Packets Tx) which is described in
   3417 		 * 82576's datasheet.
   3418 		 */
   3419 		evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
   3420 		    NULL, xname, "Host Good Packets TX");
   3421 
   3422 		evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
   3423 		    NULL, xname, "Debug Counter 4");
   3424 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
   3425 		    NULL, xname, "Rx Desc Min Thresh");
   3426 		/* XXX Is the circuit breaker only for 82576? */
   3427 		evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
   3428 		    NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
   3429 
   3430 		evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
   3431 		    NULL, xname, "Host Good Octets Rx");
   3432 		evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
   3433 		    NULL, xname, "Host Good Octets Tx");
   3434 		evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
   3435 		    NULL, xname, "Length Errors");
   3436 	}
   3437 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3438 		evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
   3439 		    NULL, xname, "EEE Tx LPI");
   3440 		evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
   3441 		    NULL, xname, "EEE Rx LPI");
   3442 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3443 		    NULL, xname, "BMC2OS Packets received by host");
   3444 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3445 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3446 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3447 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3448 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3449 		    NULL, xname, "OS2BMC Packets received by BMC");
   3450 		evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
   3451 		    NULL, xname, "SerDes/SGMII Code Violation Packet");
   3452 		evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
   3453 		    NULL, xname, "Header Redirection Missed Packet");
   3454 	}
   3455 #endif /* WM_EVENT_COUNTERS */
   3456 
   3457 	sc->sc_txrx_use_workqueue = false;
   3458 
   3459 	if (wm_phy_need_linkdown_discard(sc)) {
   3460 		DPRINTF(sc, WM_DEBUG_LINK,
   3461 		    ("%s: %s: Set linkdown discard flag\n",
   3462 			device_xname(sc->sc_dev), __func__));
   3463 		wm_set_linkdown_discard(sc);
   3464 	}
   3465 
   3466 	wm_init_sysctls(sc);
   3467 
   3468 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3469 		pmf_class_network_register(self, ifp);
   3470 	else
   3471 		aprint_error_dev(self, "couldn't establish power handler\n");
   3472 
   3473 	sc->sc_flags |= WM_F_ATTACHED;
   3474 out:
   3475 	return;
   3476 }
   3477 
   3478 /* The detach function (ca_detach) */
   3479 static int
   3480 wm_detach(device_t self, int flags __unused)
   3481 {
   3482 	struct wm_softc *sc = device_private(self);
   3483 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3484 	int i;
   3485 
   3486 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3487 		return 0;
   3488 
   3489 	/* Stop the interface. Callouts are stopped in it. */
   3490 	IFNET_LOCK(ifp);
   3491 	sc->sc_dying = true;
   3492 	wm_stop(ifp, 1);
   3493 	IFNET_UNLOCK(ifp);
   3494 
   3495 	pmf_device_deregister(self);
   3496 
   3497 	sysctl_teardown(&sc->sc_sysctllog);
   3498 
   3499 #ifdef WM_EVENT_COUNTERS
   3500 	evcnt_detach(&sc->sc_ev_linkintr);
   3501 
   3502 	evcnt_detach(&sc->sc_ev_crcerrs);
   3503 	evcnt_detach(&sc->sc_ev_symerrc);
   3504 	evcnt_detach(&sc->sc_ev_mpc);
   3505 	evcnt_detach(&sc->sc_ev_colc);
   3506 	evcnt_detach(&sc->sc_ev_sec);
   3507 	evcnt_detach(&sc->sc_ev_rlec);
   3508 
   3509 	if (sc->sc_type >= WM_T_82543) {
   3510 		evcnt_detach(&sc->sc_ev_algnerrc);
   3511 		evcnt_detach(&sc->sc_ev_rxerrc);
   3512 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3513 			evcnt_detach(&sc->sc_ev_cexterr);
   3514 		else
   3515 			evcnt_detach(&sc->sc_ev_htdpmc);
   3516 
   3517 		evcnt_detach(&sc->sc_ev_tncrs);
   3518 		evcnt_detach(&sc->sc_ev_tsctc);
   3519 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3520 			evcnt_detach(&sc->sc_ev_tsctfc);
   3521 		else {
   3522 			evcnt_detach(&sc->sc_ev_cbrdpc);
   3523 			evcnt_detach(&sc->sc_ev_cbrmpc);
   3524 		}
   3525 	}
   3526 
   3527 	if (sc->sc_type >= WM_T_82542_2_1) {
   3528 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3529 		evcnt_detach(&sc->sc_ev_tx_xon);
   3530 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3531 		evcnt_detach(&sc->sc_ev_rx_xon);
   3532 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3533 	}
   3534 
   3535 	evcnt_detach(&sc->sc_ev_scc);
   3536 	evcnt_detach(&sc->sc_ev_ecol);
   3537 	evcnt_detach(&sc->sc_ev_mcc);
   3538 	evcnt_detach(&sc->sc_ev_latecol);
   3539 
   3540 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
   3541 		evcnt_detach(&sc->sc_ev_cbtmpc);
   3542 
   3543 	evcnt_detach(&sc->sc_ev_dc);
   3544 	evcnt_detach(&sc->sc_ev_prc64);
   3545 	evcnt_detach(&sc->sc_ev_prc127);
   3546 	evcnt_detach(&sc->sc_ev_prc255);
   3547 	evcnt_detach(&sc->sc_ev_prc511);
   3548 	evcnt_detach(&sc->sc_ev_prc1023);
   3549 	evcnt_detach(&sc->sc_ev_prc1522);
   3550 	evcnt_detach(&sc->sc_ev_gprc);
   3551 	evcnt_detach(&sc->sc_ev_bprc);
   3552 	evcnt_detach(&sc->sc_ev_mprc);
   3553 	evcnt_detach(&sc->sc_ev_gptc);
   3554 	evcnt_detach(&sc->sc_ev_gorc);
   3555 	evcnt_detach(&sc->sc_ev_gotc);
   3556 	evcnt_detach(&sc->sc_ev_rnbc);
   3557 	evcnt_detach(&sc->sc_ev_ruc);
   3558 	evcnt_detach(&sc->sc_ev_rfc);
   3559 	evcnt_detach(&sc->sc_ev_roc);
   3560 	evcnt_detach(&sc->sc_ev_rjc);
   3561 	if (sc->sc_type >= WM_T_82540) {
   3562 		evcnt_detach(&sc->sc_ev_mgtprc);
   3563 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3564 		evcnt_detach(&sc->sc_ev_mgtptc);
   3565 	}
   3566 	evcnt_detach(&sc->sc_ev_tor);
   3567 	evcnt_detach(&sc->sc_ev_tot);
   3568 	evcnt_detach(&sc->sc_ev_tpr);
   3569 	evcnt_detach(&sc->sc_ev_tpt);
   3570 	evcnt_detach(&sc->sc_ev_ptc64);
   3571 	evcnt_detach(&sc->sc_ev_ptc127);
   3572 	evcnt_detach(&sc->sc_ev_ptc255);
   3573 	evcnt_detach(&sc->sc_ev_ptc511);
   3574 	evcnt_detach(&sc->sc_ev_ptc1023);
   3575 	evcnt_detach(&sc->sc_ev_ptc1522);
   3576 	evcnt_detach(&sc->sc_ev_mptc);
   3577 	evcnt_detach(&sc->sc_ev_bptc);
   3578 	evcnt_detach(&sc->sc_ev_iac);
   3579 	if (sc->sc_type < WM_T_82575) {
   3580 		evcnt_detach(&sc->sc_ev_icrxptc);
   3581 		evcnt_detach(&sc->sc_ev_icrxatc);
   3582 		evcnt_detach(&sc->sc_ev_ictxptc);
   3583 		evcnt_detach(&sc->sc_ev_ictxact);
   3584 		evcnt_detach(&sc->sc_ev_ictxqec);
   3585 		evcnt_detach(&sc->sc_ev_ictxqmtc);
   3586 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3587 		evcnt_detach(&sc->sc_ev_icrxoc);
   3588 	} else if (!WM_IS_ICHPCH(sc)) {
   3589 		evcnt_detach(&sc->sc_ev_rpthc);
   3590 		evcnt_detach(&sc->sc_ev_debug1);
   3591 		evcnt_detach(&sc->sc_ev_debug2);
   3592 		evcnt_detach(&sc->sc_ev_debug3);
   3593 		evcnt_detach(&sc->sc_ev_hgptc);
   3594 		evcnt_detach(&sc->sc_ev_debug4);
   3595 		evcnt_detach(&sc->sc_ev_rxdmtc);
   3596 		evcnt_detach(&sc->sc_ev_htcbdpc);
   3597 
   3598 		evcnt_detach(&sc->sc_ev_hgorc);
   3599 		evcnt_detach(&sc->sc_ev_hgotc);
   3600 		evcnt_detach(&sc->sc_ev_lenerrs);
   3601 	}
   3602 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3603 		evcnt_detach(&sc->sc_ev_tlpic);
   3604 		evcnt_detach(&sc->sc_ev_rlpic);
   3605 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3606 		evcnt_detach(&sc->sc_ev_o2bspc);
   3607 		evcnt_detach(&sc->sc_ev_b2ospc);
   3608 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3609 		evcnt_detach(&sc->sc_ev_scvpc);
   3610 		evcnt_detach(&sc->sc_ev_hrmpc);
   3611 	}
   3612 #endif /* WM_EVENT_COUNTERS */
   3613 
   3614 	rnd_detach_source(&sc->rnd_source);
   3615 
   3616 	/* Tell the firmware about the release */
   3617 	mutex_enter(sc->sc_core_lock);
   3618 	wm_release_manageability(sc);
   3619 	wm_release_hw_control(sc);
   3620 	wm_enable_wakeup(sc);
   3621 	mutex_exit(sc->sc_core_lock);
   3622 
   3623 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3624 
   3625 	ether_ifdetach(ifp);
   3626 	if_detach(ifp);
   3627 	if_percpuq_destroy(sc->sc_ipq);
   3628 
   3629 	/* Delete all remaining media. */
   3630 	ifmedia_fini(&sc->sc_mii.mii_media);
   3631 
   3632 	/* Unload RX dmamaps and free mbufs */
   3633 	for (i = 0; i < sc->sc_nqueues; i++) {
   3634 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3635 		mutex_enter(rxq->rxq_lock);
   3636 		wm_rxdrain(rxq);
   3637 		mutex_exit(rxq->rxq_lock);
   3638 	}
   3639 	/* Must unlock here */
   3640 
   3641 	/* Disestablish the interrupt handler */
   3642 	for (i = 0; i < sc->sc_nintrs; i++) {
   3643 		if (sc->sc_ihs[i] != NULL) {
   3644 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3645 			sc->sc_ihs[i] = NULL;
   3646 		}
   3647 	}
   3648 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3649 
   3650 	/* wm_stop() ensured that the workqueues are stopped. */
   3651 	workqueue_destroy(sc->sc_queue_wq);
   3652 	workqueue_destroy(sc->sc_reset_wq);
   3653 
   3654 	for (i = 0; i < sc->sc_nqueues; i++)
   3655 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3656 
   3657 	wm_free_txrx_queues(sc);
   3658 
   3659 	/* Unmap the registers */
   3660 	if (sc->sc_ss) {
   3661 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3662 		sc->sc_ss = 0;
   3663 	}
   3664 	if (sc->sc_ios) {
   3665 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3666 		sc->sc_ios = 0;
   3667 	}
   3668 	if (sc->sc_flashs) {
   3669 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3670 		sc->sc_flashs = 0;
   3671 	}
   3672 
   3673 	if (sc->sc_core_lock)
   3674 		mutex_obj_free(sc->sc_core_lock);
   3675 	if (sc->sc_ich_phymtx)
   3676 		mutex_obj_free(sc->sc_ich_phymtx);
   3677 	if (sc->sc_ich_nvmmtx)
   3678 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3679 
   3680 	return 0;
   3681 }
   3682 
   3683 static bool
   3684 wm_suspend(device_t self, const pmf_qual_t *qual)
   3685 {
   3686 	struct wm_softc *sc = device_private(self);
   3687 
   3688 	wm_release_manageability(sc);
   3689 	wm_release_hw_control(sc);
   3690 	wm_enable_wakeup(sc);
   3691 
   3692 	return true;
   3693 }
   3694 
   3695 static bool
   3696 wm_resume(device_t self, const pmf_qual_t *qual)
   3697 {
   3698 	struct wm_softc *sc = device_private(self);
   3699 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3700 	pcireg_t reg;
   3701 	char buf[256];
   3702 
   3703 	reg = CSR_READ(sc, WMREG_WUS);
   3704 	if (reg != 0) {
   3705 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3706 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3707 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3708 	}
   3709 
   3710 	if (sc->sc_type >= WM_T_PCH2)
   3711 		wm_resume_workarounds_pchlan(sc);
   3712 	IFNET_LOCK(ifp);
   3713 	if ((ifp->if_flags & IFF_UP) == 0) {
   3714 		/* >= PCH_SPT hardware workaround before reset. */
   3715 		if (sc->sc_type >= WM_T_PCH_SPT)
   3716 			wm_flush_desc_rings(sc);
   3717 
   3718 		wm_reset(sc);
   3719 		/* Non-AMT based hardware can now take control from firmware */
   3720 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3721 			wm_get_hw_control(sc);
   3722 		wm_init_manageability(sc);
   3723 	} else {
   3724 		/*
   3725 		 * We called pmf_class_network_register(), so if_init() is
   3726 		 * automatically called when IFF_UP. wm_reset(),
   3727 		 * wm_get_hw_control() and wm_init_manageability() are called
   3728 		 * via wm_init().
   3729 		 */
   3730 	}
   3731 	IFNET_UNLOCK(ifp);
   3732 
   3733 	return true;
   3734 }
   3735 
   3736 /*
   3737  * wm_watchdog:
   3738  *
   3739  *	Watchdog checker.
   3740  */
   3741 static bool
   3742 wm_watchdog(struct ifnet *ifp)
   3743 {
   3744 	int qid;
   3745 	struct wm_softc *sc = ifp->if_softc;
   3746 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3747 
   3748 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3749 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3750 
   3751 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3752 	}
   3753 
   3754 #ifdef WM_DEBUG
   3755 	if (sc->sc_trigger_reset) {
   3756 		/* debug operation, no need for atomicity or reliability */
   3757 		sc->sc_trigger_reset = 0;
   3758 		hang_queue++;
   3759 	}
   3760 #endif
   3761 
   3762 	if (hang_queue == 0)
   3763 		return true;
   3764 
   3765 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
   3766 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
   3767 
   3768 	return false;
   3769 }
   3770 
   3771 /*
   3772  * Perform an interface watchdog reset.
   3773  */
   3774 static void
   3775 wm_handle_reset_work(struct work *work, void *arg)
   3776 {
   3777 	struct wm_softc * const sc = arg;
   3778 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
   3779 
   3780 	/* Don't want ioctl operations to happen */
   3781 	IFNET_LOCK(ifp);
   3782 
   3783 	/* reset the interface. */
   3784 	wm_init(ifp);
   3785 
   3786 	IFNET_UNLOCK(ifp);
   3787 
   3788 	/*
   3789 	 * There are still some upper layer processing which call
   3790 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   3791 	 */
   3792 	/* Try to get more packets going. */
   3793 	ifp->if_start(ifp);
   3794 
   3795 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
   3796 }
   3797 
   3798 
   3799 static void
   3800 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3801 {
   3802 
   3803 	mutex_enter(txq->txq_lock);
   3804 	if (txq->txq_sending &&
   3805 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3806 		wm_watchdog_txq_locked(ifp, txq, hang);
   3807 
   3808 	mutex_exit(txq->txq_lock);
   3809 }
   3810 
   3811 static void
   3812 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3813     uint16_t *hang)
   3814 {
   3815 	struct wm_softc *sc = ifp->if_softc;
   3816 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3817 
   3818 	KASSERT(mutex_owned(txq->txq_lock));
   3819 
   3820 	/*
   3821 	 * Since we're using delayed interrupts, sweep up
   3822 	 * before we report an error.
   3823 	 */
   3824 	wm_txeof(txq, UINT_MAX);
   3825 
   3826 	if (txq->txq_sending)
   3827 		*hang |= __BIT(wmq->wmq_id);
   3828 
   3829 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3830 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3831 		    device_xname(sc->sc_dev));
   3832 	} else {
   3833 #ifdef WM_DEBUG
   3834 		int i, j;
   3835 		struct wm_txsoft *txs;
   3836 #endif
   3837 		log(LOG_ERR,
   3838 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3839 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3840 		    txq->txq_next);
   3841 		if_statinc(ifp, if_oerrors);
   3842 #ifdef WM_DEBUG
   3843 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3844 		     i = WM_NEXTTXS(txq, i)) {
   3845 			txs = &txq->txq_soft[i];
   3846 			printf("txs %d tx %d -> %d\n",
   3847 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3848 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3849 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3850 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3851 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3852 					printf("\t %#08x%08x\n",
   3853 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3854 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3855 				} else {
   3856 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3857 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3858 					    txq->txq_descs[j].wtx_addr.wa_low);
   3859 					printf("\t %#04x%02x%02x%08x\n",
   3860 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3861 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3862 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3863 					    txq->txq_descs[j].wtx_cmdlen);
   3864 				}
   3865 				if (j == txs->txs_lastdesc)
   3866 					break;
   3867 			}
   3868 		}
   3869 #endif
   3870 	}
   3871 }
   3872 
   3873 /*
   3874  * wm_tick:
   3875  *
   3876  *	One second timer, used to check link status, sweep up
   3877  *	completed transmit jobs, etc.
   3878  */
   3879 static void
   3880 wm_tick(void *arg)
   3881 {
   3882 	struct wm_softc *sc = arg;
   3883 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3884 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   3885 	    cexterr;
   3886 
   3887 	mutex_enter(sc->sc_core_lock);
   3888 
   3889 	if (sc->sc_core_stopping) {
   3890 		mutex_exit(sc->sc_core_lock);
   3891 		return;
   3892 	}
   3893 
   3894 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   3895 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   3896 	mpc = CSR_READ(sc, WMREG_MPC);
   3897 	colc = CSR_READ(sc, WMREG_COLC);
   3898 	sec = CSR_READ(sc, WMREG_SEC);
   3899 	rlec = CSR_READ(sc, WMREG_RLEC);
   3900 
   3901 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   3902 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   3903 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   3904 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   3905 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   3906 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   3907 
   3908 	if (sc->sc_type >= WM_T_82543) {
   3909 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   3910 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   3911 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   3912 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   3913 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
   3914 			cexterr = CSR_READ(sc, WMREG_CEXTERR);
   3915 			WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   3916 		} else {
   3917 			cexterr = 0;
   3918 			/* Excessive collision + Link down */
   3919 			WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
   3920 			    CSR_READ(sc, WMREG_HTDPMC));
   3921 		}
   3922 
   3923 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   3924 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   3925 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
   3926 			WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
   3927 			    CSR_READ(sc, WMREG_TSCTFC));
   3928 		else {
   3929 			WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
   3930 			    CSR_READ(sc, WMREG_CBRMPC));
   3931 		}
   3932 	} else
   3933 		algnerrc = rxerrc = cexterr = 0;
   3934 
   3935 	if (sc->sc_type >= WM_T_82542_2_1) {
   3936 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3937 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3938 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3939 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3940 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3941 	}
   3942 
   3943 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   3944 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   3945 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   3946 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   3947 
   3948 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
   3949 		WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
   3950 	}
   3951 
   3952 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   3953 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   3954 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   3955 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   3956 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   3957 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   3958 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   3959 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   3960 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   3961 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   3962 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   3963 
   3964 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   3965 	    CSR_READ(sc, WMREG_GORCL) +
   3966 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
   3967 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   3968 	    CSR_READ(sc, WMREG_GOTCL) +
   3969 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
   3970 
   3971 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   3972 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   3973 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   3974 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   3975 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   3976 
   3977 	if (sc->sc_type >= WM_T_82540) {
   3978 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   3979 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   3980 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   3981 	}
   3982 
   3983 	/*
   3984 	 * The TOR(L) register includes:
   3985 	 *  - Error
   3986 	 *  - Flow control
   3987 	 *  - Broadcast rejected (This note is described in 82574 and newer
   3988 	 *    datasheets. What does "broadcast rejected" mean?)
   3989 	 */
   3990 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   3991 	    CSR_READ(sc, WMREG_TORL) +
   3992 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
   3993 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   3994 	    CSR_READ(sc, WMREG_TOTL) +
   3995 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
   3996 
   3997 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   3998 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   3999 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   4000 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   4001 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   4002 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   4003 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   4004 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   4005 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   4006 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   4007 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   4008 	if (sc->sc_type < WM_T_82575) {
   4009 		WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   4010 		WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   4011 		WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   4012 		WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
   4013 		WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   4014 		WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
   4015 		    CSR_READ(sc, WMREG_ICTXQMTC));
   4016 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
   4017 		    CSR_READ(sc, WMREG_ICRXDMTC));
   4018 		WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   4019 	} else if (!WM_IS_ICHPCH(sc)) {
   4020 		WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
   4021 		WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
   4022 		WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
   4023 		WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
   4024 		WM_EVCNT_ADD(&sc->sc_ev_hgptc,  CSR_READ(sc, WMREG_HGPTC));
   4025 		WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
   4026 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
   4027 		WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
   4028 
   4029 		WM_EVCNT_ADD(&sc->sc_ev_hgorc,
   4030 		    CSR_READ(sc, WMREG_HGORCL) +
   4031 		    ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
   4032 		WM_EVCNT_ADD(&sc->sc_ev_hgotc,
   4033 		    CSR_READ(sc, WMREG_HGOTCL) +
   4034 		    ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
   4035 		WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
   4036 	}
   4037 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   4038 		WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
   4039 		WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
   4040 		if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
   4041 			WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
   4042 			    CSR_READ(sc, WMREG_B2OGPRC));
   4043 			WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
   4044 			    CSR_READ(sc, WMREG_O2BSPC));
   4045 			WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
   4046 			    CSR_READ(sc, WMREG_B2OSPC));
   4047 			WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
   4048 			    CSR_READ(sc, WMREG_O2BGPTC));
   4049 		}
   4050 		WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
   4051 		WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
   4052 	}
   4053 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   4054 	if_statadd_ref(nsr, if_collisions, colc);
   4055 	if_statadd_ref(nsr, if_ierrors,
   4056 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   4057 	/*
   4058 	 * WMREG_RNBC is incremented when there are no available buffers in
   4059 	 * host memory. It does not mean the number of dropped packets, because
   4060 	 * an Ethernet controller can receive packets in such case if there is
   4061 	 * space in the phy's FIFO.
   4062 	 *
   4063 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   4064 	 * own EVCNT instead of if_iqdrops.
   4065 	 */
   4066 	if_statadd_ref(nsr, if_iqdrops, mpc);
   4067 	IF_STAT_PUTREF(ifp);
   4068 
   4069 	if (sc->sc_flags & WM_F_HAS_MII)
   4070 		mii_tick(&sc->sc_mii);
   4071 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   4072 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4073 		wm_serdes_tick(sc);
   4074 	else
   4075 		wm_tbi_tick(sc);
   4076 
   4077 	mutex_exit(sc->sc_core_lock);
   4078 
   4079 	if (wm_watchdog(ifp))
   4080 		callout_schedule(&sc->sc_tick_ch, hz);
   4081 }
   4082 
   4083 static int
   4084 wm_ifflags_cb(struct ethercom *ec)
   4085 {
   4086 	struct ifnet *ifp = &ec->ec_if;
   4087 	struct wm_softc *sc = ifp->if_softc;
   4088 	u_short iffchange;
   4089 	int ecchange;
   4090 	bool needreset = false;
   4091 	int rc = 0;
   4092 
   4093 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4094 		device_xname(sc->sc_dev), __func__));
   4095 
   4096 	KASSERT(IFNET_LOCKED(ifp));
   4097 
   4098 	mutex_enter(sc->sc_core_lock);
   4099 
   4100 	/*
   4101 	 * Check for if_flags.
   4102 	 * Main usage is to prevent linkdown when opening bpf.
   4103 	 */
   4104 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   4105 	sc->sc_if_flags = ifp->if_flags;
   4106 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   4107 		needreset = true;
   4108 		goto ec;
   4109 	}
   4110 
   4111 	/* iff related updates */
   4112 	if ((iffchange & IFF_PROMISC) != 0)
   4113 		wm_set_filter(sc);
   4114 
   4115 	wm_set_vlan(sc);
   4116 
   4117 ec:
   4118 	/* Check for ec_capenable. */
   4119 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   4120 	sc->sc_ec_capenable = ec->ec_capenable;
   4121 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   4122 		needreset = true;
   4123 		goto out;
   4124 	}
   4125 
   4126 	/* ec related updates */
   4127 	wm_set_eee(sc);
   4128 
   4129 out:
   4130 	if (needreset)
   4131 		rc = ENETRESET;
   4132 	mutex_exit(sc->sc_core_lock);
   4133 
   4134 	return rc;
   4135 }
   4136 
   4137 static bool
   4138 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   4139 {
   4140 
   4141 	switch (sc->sc_phytype) {
   4142 	case WMPHY_82577: /* ihphy */
   4143 	case WMPHY_82578: /* atphy */
   4144 	case WMPHY_82579: /* ihphy */
   4145 	case WMPHY_I217: /* ihphy */
   4146 	case WMPHY_82580: /* ihphy */
   4147 	case WMPHY_I350: /* ihphy */
   4148 		return true;
   4149 	default:
   4150 		return false;
   4151 	}
   4152 }
   4153 
   4154 static void
   4155 wm_set_linkdown_discard(struct wm_softc *sc)
   4156 {
   4157 
   4158 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4159 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4160 
   4161 		mutex_enter(txq->txq_lock);
   4162 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   4163 		mutex_exit(txq->txq_lock);
   4164 	}
   4165 }
   4166 
   4167 static void
   4168 wm_clear_linkdown_discard(struct wm_softc *sc)
   4169 {
   4170 
   4171 	for (int i = 0; i < sc->sc_nqueues; i++) {
   4172 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4173 
   4174 		mutex_enter(txq->txq_lock);
   4175 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   4176 		mutex_exit(txq->txq_lock);
   4177 	}
   4178 }
   4179 
   4180 /*
   4181  * wm_ioctl:		[ifnet interface function]
   4182  *
   4183  *	Handle control requests from the operator.
   4184  */
   4185 static int
   4186 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   4187 {
   4188 	struct wm_softc *sc = ifp->if_softc;
   4189 	struct ifreq *ifr = (struct ifreq *)data;
   4190 	struct ifaddr *ifa = (struct ifaddr *)data;
   4191 	struct sockaddr_dl *sdl;
   4192 	int error;
   4193 
   4194 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4195 		device_xname(sc->sc_dev), __func__));
   4196 
   4197 	switch (cmd) {
   4198 	case SIOCADDMULTI:
   4199 	case SIOCDELMULTI:
   4200 		break;
   4201 	default:
   4202 		KASSERT(IFNET_LOCKED(ifp));
   4203 	}
   4204 
   4205 	switch (cmd) {
   4206 	case SIOCSIFMEDIA:
   4207 		mutex_enter(sc->sc_core_lock);
   4208 		/* Flow control requires full-duplex mode. */
   4209 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4210 		    (ifr->ifr_media & IFM_FDX) == 0)
   4211 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4212 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4213 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4214 				/* We can do both TXPAUSE and RXPAUSE. */
   4215 				ifr->ifr_media |=
   4216 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4217 			}
   4218 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4219 		}
   4220 		mutex_exit(sc->sc_core_lock);
   4221 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4222 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4223 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4224 				DPRINTF(sc, WM_DEBUG_LINK,
   4225 				    ("%s: %s: Set linkdown discard flag\n",
   4226 					device_xname(sc->sc_dev), __func__));
   4227 				wm_set_linkdown_discard(sc);
   4228 			}
   4229 		}
   4230 		break;
   4231 	case SIOCINITIFADDR:
   4232 		mutex_enter(sc->sc_core_lock);
   4233 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4234 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4235 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4236 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4237 			/* Unicast address is the first multicast entry */
   4238 			wm_set_filter(sc);
   4239 			error = 0;
   4240 			mutex_exit(sc->sc_core_lock);
   4241 			break;
   4242 		}
   4243 		mutex_exit(sc->sc_core_lock);
   4244 		/*FALLTHROUGH*/
   4245 	default:
   4246 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4247 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4248 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4249 				DPRINTF(sc, WM_DEBUG_LINK,
   4250 				    ("%s: %s: Set linkdown discard flag\n",
   4251 					device_xname(sc->sc_dev), __func__));
   4252 				wm_set_linkdown_discard(sc);
   4253 			}
   4254 		}
   4255 		const int s = splnet();
   4256 		/* It may call wm_start, so unlock here */
   4257 		error = ether_ioctl(ifp, cmd, data);
   4258 		splx(s);
   4259 		if (error != ENETRESET)
   4260 			break;
   4261 
   4262 		error = 0;
   4263 
   4264 		if (cmd == SIOCSIFCAP)
   4265 			error = if_init(ifp);
   4266 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4267 			mutex_enter(sc->sc_core_lock);
   4268 			if (sc->sc_if_flags & IFF_RUNNING) {
   4269 				/*
   4270 				 * Multicast list has changed; set the
   4271 				 * hardware filter accordingly.
   4272 				 */
   4273 				wm_set_filter(sc);
   4274 			}
   4275 			mutex_exit(sc->sc_core_lock);
   4276 		}
   4277 		break;
   4278 	}
   4279 
   4280 	return error;
   4281 }
   4282 
   4283 /* MAC address related */
   4284 
   4285 /*
   4286  * Get the offset of MAC address and return it.
   4287  * If error occured, use offset 0.
   4288  */
   4289 static uint16_t
   4290 wm_check_alt_mac_addr(struct wm_softc *sc)
   4291 {
   4292 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4293 	uint16_t offset = NVM_OFF_MACADDR;
   4294 
   4295 	/* Try to read alternative MAC address pointer */
   4296 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4297 		return 0;
   4298 
   4299 	/* Check pointer if it's valid or not. */
   4300 	if ((offset == 0x0000) || (offset == 0xffff))
   4301 		return 0;
   4302 
   4303 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4304 	/*
   4305 	 * Check whether alternative MAC address is valid or not.
   4306 	 * Some cards have non 0xffff pointer but those don't use
   4307 	 * alternative MAC address in reality.
   4308 	 *
   4309 	 * Check whether the broadcast bit is set or not.
   4310 	 */
   4311 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4312 		if (((myea[0] & 0xff) & 0x01) == 0)
   4313 			return offset; /* Found */
   4314 
   4315 	/* Not found */
   4316 	return 0;
   4317 }
   4318 
   4319 static int
   4320 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4321 {
   4322 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4323 	uint16_t offset = NVM_OFF_MACADDR;
   4324 	int do_invert = 0;
   4325 
   4326 	switch (sc->sc_type) {
   4327 	case WM_T_82580:
   4328 	case WM_T_I350:
   4329 	case WM_T_I354:
   4330 		/* EEPROM Top Level Partitioning */
   4331 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4332 		break;
   4333 	case WM_T_82571:
   4334 	case WM_T_82575:
   4335 	case WM_T_82576:
   4336 	case WM_T_80003:
   4337 	case WM_T_I210:
   4338 	case WM_T_I211:
   4339 		offset = wm_check_alt_mac_addr(sc);
   4340 		if (offset == 0)
   4341 			if ((sc->sc_funcid & 0x01) == 1)
   4342 				do_invert = 1;
   4343 		break;
   4344 	default:
   4345 		if ((sc->sc_funcid & 0x01) == 1)
   4346 			do_invert = 1;
   4347 		break;
   4348 	}
   4349 
   4350 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4351 		goto bad;
   4352 
   4353 	enaddr[0] = myea[0] & 0xff;
   4354 	enaddr[1] = myea[0] >> 8;
   4355 	enaddr[2] = myea[1] & 0xff;
   4356 	enaddr[3] = myea[1] >> 8;
   4357 	enaddr[4] = myea[2] & 0xff;
   4358 	enaddr[5] = myea[2] >> 8;
   4359 
   4360 	/*
   4361 	 * Toggle the LSB of the MAC address on the second port
   4362 	 * of some dual port cards.
   4363 	 */
   4364 	if (do_invert != 0)
   4365 		enaddr[5] ^= 1;
   4366 
   4367 	return 0;
   4368 
   4369 bad:
   4370 	return -1;
   4371 }
   4372 
   4373 /*
   4374  * wm_set_ral:
   4375  *
   4376  *	Set an entery in the receive address list.
   4377  */
   4378 static void
   4379 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4380 {
   4381 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4382 	uint32_t wlock_mac;
   4383 	int rv;
   4384 
   4385 	if (enaddr != NULL) {
   4386 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4387 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4388 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4389 		ral_hi |= RAL_AV;
   4390 	} else {
   4391 		ral_lo = 0;
   4392 		ral_hi = 0;
   4393 	}
   4394 
   4395 	switch (sc->sc_type) {
   4396 	case WM_T_82542_2_0:
   4397 	case WM_T_82542_2_1:
   4398 	case WM_T_82543:
   4399 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4400 		CSR_WRITE_FLUSH(sc);
   4401 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4402 		CSR_WRITE_FLUSH(sc);
   4403 		break;
   4404 	case WM_T_PCH2:
   4405 	case WM_T_PCH_LPT:
   4406 	case WM_T_PCH_SPT:
   4407 	case WM_T_PCH_CNP:
   4408 		if (idx == 0) {
   4409 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4410 			CSR_WRITE_FLUSH(sc);
   4411 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4412 			CSR_WRITE_FLUSH(sc);
   4413 			return;
   4414 		}
   4415 		if (sc->sc_type != WM_T_PCH2) {
   4416 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4417 			    FWSM_WLOCK_MAC);
   4418 			addrl = WMREG_SHRAL(idx - 1);
   4419 			addrh = WMREG_SHRAH(idx - 1);
   4420 		} else {
   4421 			wlock_mac = 0;
   4422 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4423 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4424 		}
   4425 
   4426 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4427 			rv = wm_get_swflag_ich8lan(sc);
   4428 			if (rv != 0)
   4429 				return;
   4430 			CSR_WRITE(sc, addrl, ral_lo);
   4431 			CSR_WRITE_FLUSH(sc);
   4432 			CSR_WRITE(sc, addrh, ral_hi);
   4433 			CSR_WRITE_FLUSH(sc);
   4434 			wm_put_swflag_ich8lan(sc);
   4435 		}
   4436 
   4437 		break;
   4438 	default:
   4439 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4440 		CSR_WRITE_FLUSH(sc);
   4441 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4442 		CSR_WRITE_FLUSH(sc);
   4443 		break;
   4444 	}
   4445 }
   4446 
   4447 /*
   4448  * wm_mchash:
   4449  *
   4450  *	Compute the hash of the multicast address for the 4096-bit
   4451  *	multicast filter.
   4452  */
   4453 static uint32_t
   4454 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4455 {
   4456 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4457 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4458 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4459 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4460 	uint32_t hash;
   4461 
   4462 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4463 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4464 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4465 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4466 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4467 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4468 		return (hash & 0x3ff);
   4469 	}
   4470 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4471 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4472 
   4473 	return (hash & 0xfff);
   4474 }
   4475 
   4476 /*
   4477  *
   4478  *
   4479  */
   4480 static int
   4481 wm_rar_count(struct wm_softc *sc)
   4482 {
   4483 	int size;
   4484 
   4485 	switch (sc->sc_type) {
   4486 	case WM_T_ICH8:
   4487 		size = WM_RAL_TABSIZE_ICH8 -1;
   4488 		break;
   4489 	case WM_T_ICH9:
   4490 	case WM_T_ICH10:
   4491 	case WM_T_PCH:
   4492 		size = WM_RAL_TABSIZE_ICH8;
   4493 		break;
   4494 	case WM_T_PCH2:
   4495 		size = WM_RAL_TABSIZE_PCH2;
   4496 		break;
   4497 	case WM_T_PCH_LPT:
   4498 	case WM_T_PCH_SPT:
   4499 	case WM_T_PCH_CNP:
   4500 		size = WM_RAL_TABSIZE_PCH_LPT;
   4501 		break;
   4502 	case WM_T_82575:
   4503 	case WM_T_I210:
   4504 	case WM_T_I211:
   4505 		size = WM_RAL_TABSIZE_82575;
   4506 		break;
   4507 	case WM_T_82576:
   4508 	case WM_T_82580:
   4509 		size = WM_RAL_TABSIZE_82576;
   4510 		break;
   4511 	case WM_T_I350:
   4512 	case WM_T_I354:
   4513 		size = WM_RAL_TABSIZE_I350;
   4514 		break;
   4515 	default:
   4516 		size = WM_RAL_TABSIZE;
   4517 	}
   4518 
   4519 	return size;
   4520 }
   4521 
   4522 /*
   4523  * wm_set_filter:
   4524  *
   4525  *	Set up the receive filter.
   4526  */
   4527 static void
   4528 wm_set_filter(struct wm_softc *sc)
   4529 {
   4530 	struct ethercom *ec = &sc->sc_ethercom;
   4531 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4532 	struct ether_multi *enm;
   4533 	struct ether_multistep step;
   4534 	bus_addr_t mta_reg;
   4535 	uint32_t hash, reg, bit;
   4536 	int i, size, ralmax, rv;
   4537 
   4538 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4539 		device_xname(sc->sc_dev), __func__));
   4540 	KASSERT(mutex_owned(sc->sc_core_lock));
   4541 
   4542 	if (sc->sc_type >= WM_T_82544)
   4543 		mta_reg = WMREG_CORDOVA_MTA;
   4544 	else
   4545 		mta_reg = WMREG_MTA;
   4546 
   4547 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4548 
   4549 	if (sc->sc_if_flags & IFF_BROADCAST)
   4550 		sc->sc_rctl |= RCTL_BAM;
   4551 	if (sc->sc_if_flags & IFF_PROMISC) {
   4552 		sc->sc_rctl |= RCTL_UPE;
   4553 		ETHER_LOCK(ec);
   4554 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4555 		ETHER_UNLOCK(ec);
   4556 		goto allmulti;
   4557 	}
   4558 
   4559 	/*
   4560 	 * Set the station address in the first RAL slot, and
   4561 	 * clear the remaining slots.
   4562 	 */
   4563 	size = wm_rar_count(sc);
   4564 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4565 
   4566 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4567 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4568 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4569 		switch (i) {
   4570 		case 0:
   4571 			/* We can use all entries */
   4572 			ralmax = size;
   4573 			break;
   4574 		case 1:
   4575 			/* Only RAR[0] */
   4576 			ralmax = 1;
   4577 			break;
   4578 		default:
   4579 			/* Available SHRA + RAR[0] */
   4580 			ralmax = i + 1;
   4581 		}
   4582 	} else
   4583 		ralmax = size;
   4584 	for (i = 1; i < size; i++) {
   4585 		if (i < ralmax)
   4586 			wm_set_ral(sc, NULL, i);
   4587 	}
   4588 
   4589 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4590 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4591 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4592 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4593 		size = WM_ICH8_MC_TABSIZE;
   4594 	else
   4595 		size = WM_MC_TABSIZE;
   4596 	/* Clear out the multicast table. */
   4597 	for (i = 0; i < size; i++) {
   4598 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4599 		CSR_WRITE_FLUSH(sc);
   4600 	}
   4601 
   4602 	ETHER_LOCK(ec);
   4603 	ETHER_FIRST_MULTI(step, ec, enm);
   4604 	while (enm != NULL) {
   4605 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4606 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4607 			ETHER_UNLOCK(ec);
   4608 			/*
   4609 			 * We must listen to a range of multicast addresses.
   4610 			 * For now, just accept all multicasts, rather than
   4611 			 * trying to set only those filter bits needed to match
   4612 			 * the range.  (At this time, the only use of address
   4613 			 * ranges is for IP multicast routing, for which the
   4614 			 * range is big enough to require all bits set.)
   4615 			 */
   4616 			goto allmulti;
   4617 		}
   4618 
   4619 		hash = wm_mchash(sc, enm->enm_addrlo);
   4620 
   4621 		reg = (hash >> 5);
   4622 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4623 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4624 		    || (sc->sc_type == WM_T_PCH2)
   4625 		    || (sc->sc_type == WM_T_PCH_LPT)
   4626 		    || (sc->sc_type == WM_T_PCH_SPT)
   4627 		    || (sc->sc_type == WM_T_PCH_CNP))
   4628 			reg &= 0x1f;
   4629 		else
   4630 			reg &= 0x7f;
   4631 		bit = hash & 0x1f;
   4632 
   4633 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4634 		hash |= 1U << bit;
   4635 
   4636 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4637 			/*
   4638 			 * 82544 Errata 9: Certain register cannot be written
   4639 			 * with particular alignments in PCI-X bus operation
   4640 			 * (FCAH, MTA and VFTA).
   4641 			 */
   4642 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4643 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4644 			CSR_WRITE_FLUSH(sc);
   4645 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4646 			CSR_WRITE_FLUSH(sc);
   4647 		} else {
   4648 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4649 			CSR_WRITE_FLUSH(sc);
   4650 		}
   4651 
   4652 		ETHER_NEXT_MULTI(step, enm);
   4653 	}
   4654 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4655 	ETHER_UNLOCK(ec);
   4656 
   4657 	goto setit;
   4658 
   4659 allmulti:
   4660 	sc->sc_rctl |= RCTL_MPE;
   4661 
   4662 setit:
   4663 	if (sc->sc_type >= WM_T_PCH2) {
   4664 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4665 		    && (ifp->if_mtu > ETHERMTU))
   4666 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4667 		else
   4668 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4669 		if (rv != 0)
   4670 			device_printf(sc->sc_dev,
   4671 			    "Failed to do workaround for jumbo frame.\n");
   4672 	}
   4673 
   4674 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4675 }
   4676 
   4677 /* Reset and init related */
   4678 
   4679 static void
   4680 wm_set_vlan(struct wm_softc *sc)
   4681 {
   4682 
   4683 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4684 		device_xname(sc->sc_dev), __func__));
   4685 
   4686 	/* Deal with VLAN enables. */
   4687 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4688 		sc->sc_ctrl |= CTRL_VME;
   4689 	else
   4690 		sc->sc_ctrl &= ~CTRL_VME;
   4691 
   4692 	/* Write the control registers. */
   4693 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4694 }
   4695 
   4696 static void
   4697 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4698 {
   4699 	uint32_t gcr;
   4700 	pcireg_t ctrl2;
   4701 
   4702 	gcr = CSR_READ(sc, WMREG_GCR);
   4703 
   4704 	/* Only take action if timeout value is defaulted to 0 */
   4705 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4706 		goto out;
   4707 
   4708 	if ((gcr & GCR_CAP_VER2) == 0) {
   4709 		gcr |= GCR_CMPL_TMOUT_10MS;
   4710 		goto out;
   4711 	}
   4712 
   4713 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4714 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4715 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4716 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4717 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4718 
   4719 out:
   4720 	/* Disable completion timeout resend */
   4721 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4722 
   4723 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4724 }
   4725 
   4726 void
   4727 wm_get_auto_rd_done(struct wm_softc *sc)
   4728 {
   4729 	int i;
   4730 
   4731 	/* wait for eeprom to reload */
   4732 	switch (sc->sc_type) {
   4733 	case WM_T_82571:
   4734 	case WM_T_82572:
   4735 	case WM_T_82573:
   4736 	case WM_T_82574:
   4737 	case WM_T_82583:
   4738 	case WM_T_82575:
   4739 	case WM_T_82576:
   4740 	case WM_T_82580:
   4741 	case WM_T_I350:
   4742 	case WM_T_I354:
   4743 	case WM_T_I210:
   4744 	case WM_T_I211:
   4745 	case WM_T_80003:
   4746 	case WM_T_ICH8:
   4747 	case WM_T_ICH9:
   4748 		for (i = 0; i < 10; i++) {
   4749 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4750 				break;
   4751 			delay(1000);
   4752 		}
   4753 		if (i == 10) {
   4754 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4755 			    "complete\n", device_xname(sc->sc_dev));
   4756 		}
   4757 		break;
   4758 	default:
   4759 		break;
   4760 	}
   4761 }
   4762 
   4763 void
   4764 wm_lan_init_done(struct wm_softc *sc)
   4765 {
   4766 	uint32_t reg = 0;
   4767 	int i;
   4768 
   4769 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4770 		device_xname(sc->sc_dev), __func__));
   4771 
   4772 	/* Wait for eeprom to reload */
   4773 	switch (sc->sc_type) {
   4774 	case WM_T_ICH10:
   4775 	case WM_T_PCH:
   4776 	case WM_T_PCH2:
   4777 	case WM_T_PCH_LPT:
   4778 	case WM_T_PCH_SPT:
   4779 	case WM_T_PCH_CNP:
   4780 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4781 			reg = CSR_READ(sc, WMREG_STATUS);
   4782 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4783 				break;
   4784 			delay(100);
   4785 		}
   4786 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4787 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4788 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4789 		}
   4790 		break;
   4791 	default:
   4792 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4793 		    __func__);
   4794 		break;
   4795 	}
   4796 
   4797 	reg &= ~STATUS_LAN_INIT_DONE;
   4798 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4799 }
   4800 
   4801 void
   4802 wm_get_cfg_done(struct wm_softc *sc)
   4803 {
   4804 	int mask;
   4805 	uint32_t reg;
   4806 	int i;
   4807 
   4808 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4809 		device_xname(sc->sc_dev), __func__));
   4810 
   4811 	/* Wait for eeprom to reload */
   4812 	switch (sc->sc_type) {
   4813 	case WM_T_82542_2_0:
   4814 	case WM_T_82542_2_1:
   4815 		/* null */
   4816 		break;
   4817 	case WM_T_82543:
   4818 	case WM_T_82544:
   4819 	case WM_T_82540:
   4820 	case WM_T_82545:
   4821 	case WM_T_82545_3:
   4822 	case WM_T_82546:
   4823 	case WM_T_82546_3:
   4824 	case WM_T_82541:
   4825 	case WM_T_82541_2:
   4826 	case WM_T_82547:
   4827 	case WM_T_82547_2:
   4828 	case WM_T_82573:
   4829 	case WM_T_82574:
   4830 	case WM_T_82583:
   4831 		/* generic */
   4832 		delay(10*1000);
   4833 		break;
   4834 	case WM_T_80003:
   4835 	case WM_T_82571:
   4836 	case WM_T_82572:
   4837 	case WM_T_82575:
   4838 	case WM_T_82576:
   4839 	case WM_T_82580:
   4840 	case WM_T_I350:
   4841 	case WM_T_I354:
   4842 	case WM_T_I210:
   4843 	case WM_T_I211:
   4844 		if (sc->sc_type == WM_T_82571) {
   4845 			/* Only 82571 shares port 0 */
   4846 			mask = EEMNGCTL_CFGDONE_0;
   4847 		} else
   4848 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4849 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4850 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4851 				break;
   4852 			delay(1000);
   4853 		}
   4854 		if (i >= WM_PHY_CFG_TIMEOUT)
   4855 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4856 				device_xname(sc->sc_dev), __func__));
   4857 		break;
   4858 	case WM_T_ICH8:
   4859 	case WM_T_ICH9:
   4860 	case WM_T_ICH10:
   4861 	case WM_T_PCH:
   4862 	case WM_T_PCH2:
   4863 	case WM_T_PCH_LPT:
   4864 	case WM_T_PCH_SPT:
   4865 	case WM_T_PCH_CNP:
   4866 		delay(10*1000);
   4867 		if (sc->sc_type >= WM_T_ICH10)
   4868 			wm_lan_init_done(sc);
   4869 		else
   4870 			wm_get_auto_rd_done(sc);
   4871 
   4872 		/* Clear PHY Reset Asserted bit */
   4873 		reg = CSR_READ(sc, WMREG_STATUS);
   4874 		if ((reg & STATUS_PHYRA) != 0)
   4875 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4876 		break;
   4877 	default:
   4878 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4879 		    __func__);
   4880 		break;
   4881 	}
   4882 }
   4883 
   4884 int
   4885 wm_phy_post_reset(struct wm_softc *sc)
   4886 {
   4887 	device_t dev = sc->sc_dev;
   4888 	uint16_t reg;
   4889 	int rv = 0;
   4890 
   4891 	/* This function is only for ICH8 and newer. */
   4892 	if (sc->sc_type < WM_T_ICH8)
   4893 		return 0;
   4894 
   4895 	if (wm_phy_resetisblocked(sc)) {
   4896 		/* XXX */
   4897 		device_printf(dev, "PHY is blocked\n");
   4898 		return -1;
   4899 	}
   4900 
   4901 	/* Allow time for h/w to get to quiescent state after reset */
   4902 	delay(10*1000);
   4903 
   4904 	/* Perform any necessary post-reset workarounds */
   4905 	if (sc->sc_type == WM_T_PCH)
   4906 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4907 	else if (sc->sc_type == WM_T_PCH2)
   4908 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4909 	if (rv != 0)
   4910 		return rv;
   4911 
   4912 	/* Clear the host wakeup bit after lcd reset */
   4913 	if (sc->sc_type >= WM_T_PCH) {
   4914 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4915 		reg &= ~BM_WUC_HOST_WU_BIT;
   4916 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4917 	}
   4918 
   4919 	/* Configure the LCD with the extended configuration region in NVM */
   4920 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4921 		return rv;
   4922 
   4923 	/* Configure the LCD with the OEM bits in NVM */
   4924 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4925 
   4926 	if (sc->sc_type == WM_T_PCH2) {
   4927 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4928 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4929 			delay(10 * 1000);
   4930 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4931 		}
   4932 		/* Set EEE LPI Update Timer to 200usec */
   4933 		rv = sc->phy.acquire(sc);
   4934 		if (rv)
   4935 			return rv;
   4936 		rv = wm_write_emi_reg_locked(dev,
   4937 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4938 		sc->phy.release(sc);
   4939 	}
   4940 
   4941 	return rv;
   4942 }
   4943 
   4944 /* Only for PCH and newer */
   4945 static int
   4946 wm_write_smbus_addr(struct wm_softc *sc)
   4947 {
   4948 	uint32_t strap, freq;
   4949 	uint16_t phy_data;
   4950 	int rv;
   4951 
   4952 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4953 		device_xname(sc->sc_dev), __func__));
   4954 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4955 
   4956 	strap = CSR_READ(sc, WMREG_STRAP);
   4957 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4958 
   4959 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4960 	if (rv != 0)
   4961 		return rv;
   4962 
   4963 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4964 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4965 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4966 
   4967 	if (sc->sc_phytype == WMPHY_I217) {
   4968 		/* Restore SMBus frequency */
   4969 		if (freq --) {
   4970 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4971 			    | HV_SMB_ADDR_FREQ_HIGH);
   4972 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4973 			    HV_SMB_ADDR_FREQ_LOW);
   4974 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4975 			    HV_SMB_ADDR_FREQ_HIGH);
   4976 		} else
   4977 			DPRINTF(sc, WM_DEBUG_INIT,
   4978 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4979 				device_xname(sc->sc_dev), __func__));
   4980 	}
   4981 
   4982 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4983 	    phy_data);
   4984 }
   4985 
   4986 static int
   4987 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4988 {
   4989 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4990 	uint16_t phy_page = 0;
   4991 	int rv = 0;
   4992 
   4993 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4994 		device_xname(sc->sc_dev), __func__));
   4995 
   4996 	switch (sc->sc_type) {
   4997 	case WM_T_ICH8:
   4998 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4999 		    || (sc->sc_phytype != WMPHY_IGP_3))
   5000 			return 0;
   5001 
   5002 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   5003 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   5004 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   5005 			break;
   5006 		}
   5007 		/* FALLTHROUGH */
   5008 	case WM_T_PCH:
   5009 	case WM_T_PCH2:
   5010 	case WM_T_PCH_LPT:
   5011 	case WM_T_PCH_SPT:
   5012 	case WM_T_PCH_CNP:
   5013 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   5014 		break;
   5015 	default:
   5016 		return 0;
   5017 	}
   5018 
   5019 	if ((rv = sc->phy.acquire(sc)) != 0)
   5020 		return rv;
   5021 
   5022 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   5023 	if ((reg & sw_cfg_mask) == 0)
   5024 		goto release;
   5025 
   5026 	/*
   5027 	 * Make sure HW does not configure LCD from PHY extended configuration
   5028 	 * before SW configuration
   5029 	 */
   5030 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   5031 	if ((sc->sc_type < WM_T_PCH2)
   5032 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   5033 		goto release;
   5034 
   5035 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   5036 		device_xname(sc->sc_dev), __func__));
   5037 	/* word_addr is in DWORD */
   5038 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   5039 
   5040 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   5041 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   5042 	if (cnf_size == 0)
   5043 		goto release;
   5044 
   5045 	if (((sc->sc_type == WM_T_PCH)
   5046 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   5047 	    || (sc->sc_type > WM_T_PCH)) {
   5048 		/*
   5049 		 * HW configures the SMBus address and LEDs when the OEM and
   5050 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   5051 		 * are cleared, SW will configure them instead.
   5052 		 */
   5053 		DPRINTF(sc, WM_DEBUG_INIT,
   5054 		    ("%s: %s: Configure SMBus and LED\n",
   5055 			device_xname(sc->sc_dev), __func__));
   5056 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   5057 			goto release;
   5058 
   5059 		reg = CSR_READ(sc, WMREG_LEDCTL);
   5060 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   5061 		    (uint16_t)reg);
   5062 		if (rv != 0)
   5063 			goto release;
   5064 	}
   5065 
   5066 	/* Configure LCD from extended configuration region. */
   5067 	for (i = 0; i < cnf_size; i++) {
   5068 		uint16_t reg_data, reg_addr;
   5069 
   5070 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   5071 			goto release;
   5072 
   5073 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   5074 			goto release;
   5075 
   5076 		if (reg_addr == IGPHY_PAGE_SELECT)
   5077 			phy_page = reg_data;
   5078 
   5079 		reg_addr &= IGPHY_MAXREGADDR;
   5080 		reg_addr |= phy_page;
   5081 
   5082 		KASSERT(sc->phy.writereg_locked != NULL);
   5083 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   5084 		    reg_data);
   5085 	}
   5086 
   5087 release:
   5088 	sc->phy.release(sc);
   5089 	return rv;
   5090 }
   5091 
   5092 /*
   5093  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   5094  *  @sc:       pointer to the HW structure
   5095  *  @d0_state: boolean if entering d0 or d3 device state
   5096  *
   5097  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   5098  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   5099  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   5100  */
   5101 int
   5102 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   5103 {
   5104 	uint32_t mac_reg;
   5105 	uint16_t oem_reg;
   5106 	int rv;
   5107 
   5108 	if (sc->sc_type < WM_T_PCH)
   5109 		return 0;
   5110 
   5111 	rv = sc->phy.acquire(sc);
   5112 	if (rv != 0)
   5113 		return rv;
   5114 
   5115 	if (sc->sc_type == WM_T_PCH) {
   5116 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   5117 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   5118 			goto release;
   5119 	}
   5120 
   5121 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   5122 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   5123 		goto release;
   5124 
   5125 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   5126 
   5127 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   5128 	if (rv != 0)
   5129 		goto release;
   5130 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   5131 
   5132 	if (d0_state) {
   5133 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   5134 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5135 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   5136 			oem_reg |= HV_OEM_BITS_LPLU;
   5137 	} else {
   5138 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   5139 		    != 0)
   5140 			oem_reg |= HV_OEM_BITS_A1KDIS;
   5141 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   5142 		    != 0)
   5143 			oem_reg |= HV_OEM_BITS_LPLU;
   5144 	}
   5145 
   5146 	/* Set Restart auto-neg to activate the bits */
   5147 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   5148 	    && (wm_phy_resetisblocked(sc) == false))
   5149 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   5150 
   5151 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   5152 
   5153 release:
   5154 	sc->phy.release(sc);
   5155 
   5156 	return rv;
   5157 }
   5158 
   5159 /* Init hardware bits */
   5160 void
   5161 wm_initialize_hardware_bits(struct wm_softc *sc)
   5162 {
   5163 	uint32_t tarc0, tarc1, reg;
   5164 
   5165 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5166 		device_xname(sc->sc_dev), __func__));
   5167 
   5168 	/* For 82571 variant, 80003 and ICHs */
   5169 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   5170 	    || WM_IS_ICHPCH(sc)) {
   5171 
   5172 		/* Transmit Descriptor Control 0 */
   5173 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   5174 		reg |= TXDCTL_COUNT_DESC;
   5175 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   5176 
   5177 		/* Transmit Descriptor Control 1 */
   5178 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   5179 		reg |= TXDCTL_COUNT_DESC;
   5180 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   5181 
   5182 		/* TARC0 */
   5183 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   5184 		switch (sc->sc_type) {
   5185 		case WM_T_82571:
   5186 		case WM_T_82572:
   5187 		case WM_T_82573:
   5188 		case WM_T_82574:
   5189 		case WM_T_82583:
   5190 		case WM_T_80003:
   5191 			/* Clear bits 30..27 */
   5192 			tarc0 &= ~__BITS(30, 27);
   5193 			break;
   5194 		default:
   5195 			break;
   5196 		}
   5197 
   5198 		switch (sc->sc_type) {
   5199 		case WM_T_82571:
   5200 		case WM_T_82572:
   5201 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   5202 
   5203 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5204 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5205 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5206 			/* 8257[12] Errata No.7 */
   5207 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5208 
   5209 			/* TARC1 bit 28 */
   5210 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5211 				tarc1 &= ~__BIT(28);
   5212 			else
   5213 				tarc1 |= __BIT(28);
   5214 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5215 
   5216 			/*
   5217 			 * 8257[12] Errata No.13
   5218 			 * Disable Dyamic Clock Gating.
   5219 			 */
   5220 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5221 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5222 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5223 			break;
   5224 		case WM_T_82573:
   5225 		case WM_T_82574:
   5226 		case WM_T_82583:
   5227 			if ((sc->sc_type == WM_T_82574)
   5228 			    || (sc->sc_type == WM_T_82583))
   5229 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5230 
   5231 			/* Extended Device Control */
   5232 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5233 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5234 			reg |= __BIT(22);	/* Set bit 22 */
   5235 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5236 
   5237 			/* Device Control */
   5238 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5239 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5240 
   5241 			/* PCIe Control Register */
   5242 			/*
   5243 			 * 82573 Errata (unknown).
   5244 			 *
   5245 			 * 82574 Errata 25 and 82583 Errata 12
   5246 			 * "Dropped Rx Packets":
   5247 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5248 			 */
   5249 			reg = CSR_READ(sc, WMREG_GCR);
   5250 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5251 			CSR_WRITE(sc, WMREG_GCR, reg);
   5252 
   5253 			if ((sc->sc_type == WM_T_82574)
   5254 			    || (sc->sc_type == WM_T_82583)) {
   5255 				/*
   5256 				 * Document says this bit must be set for
   5257 				 * proper operation.
   5258 				 */
   5259 				reg = CSR_READ(sc, WMREG_GCR);
   5260 				reg |= __BIT(22);
   5261 				CSR_WRITE(sc, WMREG_GCR, reg);
   5262 
   5263 				/*
   5264 				 * Apply workaround for hardware errata
   5265 				 * documented in errata docs Fixes issue where
   5266 				 * some error prone or unreliable PCIe
   5267 				 * completions are occurring, particularly
   5268 				 * with ASPM enabled. Without fix, issue can
   5269 				 * cause Tx timeouts.
   5270 				 */
   5271 				reg = CSR_READ(sc, WMREG_GCR2);
   5272 				reg |= __BIT(0);
   5273 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5274 			}
   5275 			break;
   5276 		case WM_T_80003:
   5277 			/* TARC0 */
   5278 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5279 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5280 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5281 
   5282 			/* TARC1 bit 28 */
   5283 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5284 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5285 				tarc1 &= ~__BIT(28);
   5286 			else
   5287 				tarc1 |= __BIT(28);
   5288 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5289 			break;
   5290 		case WM_T_ICH8:
   5291 		case WM_T_ICH9:
   5292 		case WM_T_ICH10:
   5293 		case WM_T_PCH:
   5294 		case WM_T_PCH2:
   5295 		case WM_T_PCH_LPT:
   5296 		case WM_T_PCH_SPT:
   5297 		case WM_T_PCH_CNP:
   5298 			/* TARC0 */
   5299 			if (sc->sc_type == WM_T_ICH8) {
   5300 				/* Set TARC0 bits 29 and 28 */
   5301 				tarc0 |= __BITS(29, 28);
   5302 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5303 				tarc0 |= __BIT(29);
   5304 				/*
   5305 				 *  Drop bit 28. From Linux.
   5306 				 * See I218/I219 spec update
   5307 				 * "5. Buffer Overrun While the I219 is
   5308 				 * Processing DMA Transactions"
   5309 				 */
   5310 				tarc0 &= ~__BIT(28);
   5311 			}
   5312 			/* Set TARC0 bits 23,24,26,27 */
   5313 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5314 
   5315 			/* CTRL_EXT */
   5316 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5317 			reg |= __BIT(22);	/* Set bit 22 */
   5318 			/*
   5319 			 * Enable PHY low-power state when MAC is at D3
   5320 			 * w/o WoL
   5321 			 */
   5322 			if (sc->sc_type >= WM_T_PCH)
   5323 				reg |= CTRL_EXT_PHYPDEN;
   5324 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5325 
   5326 			/* TARC1 */
   5327 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5328 			/* bit 28 */
   5329 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5330 				tarc1 &= ~__BIT(28);
   5331 			else
   5332 				tarc1 |= __BIT(28);
   5333 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5334 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5335 
   5336 			/* Device Status */
   5337 			if (sc->sc_type == WM_T_ICH8) {
   5338 				reg = CSR_READ(sc, WMREG_STATUS);
   5339 				reg &= ~__BIT(31);
   5340 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5341 
   5342 			}
   5343 
   5344 			/* IOSFPC */
   5345 			if (sc->sc_type == WM_T_PCH_SPT) {
   5346 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5347 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5348 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5349 			}
   5350 			/*
   5351 			 * Work-around descriptor data corruption issue during
   5352 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5353 			 * capability.
   5354 			 */
   5355 			reg = CSR_READ(sc, WMREG_RFCTL);
   5356 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5357 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5358 			break;
   5359 		default:
   5360 			break;
   5361 		}
   5362 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5363 
   5364 		switch (sc->sc_type) {
   5365 		case WM_T_82571:
   5366 		case WM_T_82572:
   5367 		case WM_T_82573:
   5368 		case WM_T_80003:
   5369 		case WM_T_ICH8:
   5370 			/*
   5371 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
   5372 			 * others to avoid RSS Hash Value bug.
   5373 			 */
   5374 			reg = CSR_READ(sc, WMREG_RFCTL);
   5375 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5376 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5377 			break;
   5378 		case WM_T_82574:
   5379 			/* Use extened Rx descriptor. */
   5380 			reg = CSR_READ(sc, WMREG_RFCTL);
   5381 			reg |= WMREG_RFCTL_EXSTEN;
   5382 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5383 			break;
   5384 		default:
   5385 			break;
   5386 		}
   5387 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5388 		/*
   5389 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5390 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5391 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5392 		 * Correctly by the Device"
   5393 		 *
   5394 		 * I354(C2000) Errata AVR53:
   5395 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5396 		 * Hang"
   5397 		 */
   5398 		reg = CSR_READ(sc, WMREG_RFCTL);
   5399 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5400 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5401 	}
   5402 }
   5403 
   5404 static uint32_t
   5405 wm_rxpbs_adjust_82580(uint32_t val)
   5406 {
   5407 	uint32_t rv = 0;
   5408 
   5409 	if (val < __arraycount(wm_82580_rxpbs_table))
   5410 		rv = wm_82580_rxpbs_table[val];
   5411 
   5412 	return rv;
   5413 }
   5414 
   5415 /*
   5416  * wm_reset_phy:
   5417  *
   5418  *	generic PHY reset function.
   5419  *	Same as e1000_phy_hw_reset_generic()
   5420  */
   5421 static int
   5422 wm_reset_phy(struct wm_softc *sc)
   5423 {
   5424 	uint32_t reg;
   5425 	int rv;
   5426 
   5427 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5428 		device_xname(sc->sc_dev), __func__));
   5429 	if (wm_phy_resetisblocked(sc))
   5430 		return -1;
   5431 
   5432 	rv = sc->phy.acquire(sc);
   5433 	if (rv) {
   5434 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
   5435 		    __func__, rv);
   5436 		return rv;
   5437 	}
   5438 
   5439 	reg = CSR_READ(sc, WMREG_CTRL);
   5440 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5441 	CSR_WRITE_FLUSH(sc);
   5442 
   5443 	delay(sc->phy.reset_delay_us);
   5444 
   5445 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5446 	CSR_WRITE_FLUSH(sc);
   5447 
   5448 	delay(150);
   5449 
   5450 	sc->phy.release(sc);
   5451 
   5452 	wm_get_cfg_done(sc);
   5453 	wm_phy_post_reset(sc);
   5454 
   5455 	return 0;
   5456 }
   5457 
   5458 /*
   5459  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5460  *
   5461  * In i219, the descriptor rings must be emptied before resetting the HW
   5462  * or before changing the device state to D3 during runtime (runtime PM).
   5463  *
   5464  * Failure to do this will cause the HW to enter a unit hang state which can
   5465  * only be released by PCI reset on the device.
   5466  *
   5467  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5468  */
   5469 static void
   5470 wm_flush_desc_rings(struct wm_softc *sc)
   5471 {
   5472 	pcireg_t preg;
   5473 	uint32_t reg;
   5474 	struct wm_txqueue *txq;
   5475 	wiseman_txdesc_t *txd;
   5476 	int nexttx;
   5477 	uint32_t rctl;
   5478 
   5479 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5480 
   5481 	/* First, disable MULR fix in FEXTNVM11 */
   5482 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5483 	reg |= FEXTNVM11_DIS_MULRFIX;
   5484 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5485 
   5486 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5487 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5488 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5489 		return;
   5490 
   5491 	/*
   5492 	 * Remove all descriptors from the tx_ring.
   5493 	 *
   5494 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5495 	 * happens when the HW reads the regs. We assign the ring itself as
   5496 	 * the data of the next descriptor. We don't care about the data we are
   5497 	 * about to reset the HW.
   5498 	 */
   5499 #ifdef WM_DEBUG
   5500 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5501 #endif
   5502 	reg = CSR_READ(sc, WMREG_TCTL);
   5503 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5504 
   5505 	txq = &sc->sc_queue[0].wmq_txq;
   5506 	nexttx = txq->txq_next;
   5507 	txd = &txq->txq_descs[nexttx];
   5508 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5509 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5510 	txd->wtx_fields.wtxu_status = 0;
   5511 	txd->wtx_fields.wtxu_options = 0;
   5512 	txd->wtx_fields.wtxu_vlan = 0;
   5513 
   5514 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5515 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5516 
   5517 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5518 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5519 	CSR_WRITE_FLUSH(sc);
   5520 	delay(250);
   5521 
   5522 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5523 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5524 		return;
   5525 
   5526 	/*
   5527 	 * Mark all descriptors in the RX ring as consumed and disable the
   5528 	 * rx ring.
   5529 	 */
   5530 #ifdef WM_DEBUG
   5531 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5532 #endif
   5533 	rctl = CSR_READ(sc, WMREG_RCTL);
   5534 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5535 	CSR_WRITE_FLUSH(sc);
   5536 	delay(150);
   5537 
   5538 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5539 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5540 	reg &= 0xffffc000;
   5541 	/*
   5542 	 * Update thresholds: prefetch threshold to 31, host threshold
   5543 	 * to 1 and make sure the granularity is "descriptors" and not
   5544 	 * "cache lines"
   5545 	 */
   5546 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5547 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5548 
   5549 	/* Momentarily enable the RX ring for the changes to take effect */
   5550 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5551 	CSR_WRITE_FLUSH(sc);
   5552 	delay(150);
   5553 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5554 }
   5555 
   5556 /*
   5557  * wm_reset:
   5558  *
   5559  *	Reset the i82542 chip.
   5560  */
   5561 static void
   5562 wm_reset(struct wm_softc *sc)
   5563 {
   5564 	int phy_reset = 0;
   5565 	int i, error = 0;
   5566 	uint32_t reg;
   5567 	uint16_t kmreg;
   5568 	int rv;
   5569 
   5570 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5571 		device_xname(sc->sc_dev), __func__));
   5572 	KASSERT(sc->sc_type != 0);
   5573 
   5574 	/*
   5575 	 * Allocate on-chip memory according to the MTU size.
   5576 	 * The Packet Buffer Allocation register must be written
   5577 	 * before the chip is reset.
   5578 	 */
   5579 	switch (sc->sc_type) {
   5580 	case WM_T_82547:
   5581 	case WM_T_82547_2:
   5582 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5583 		    PBA_22K : PBA_30K;
   5584 		for (i = 0; i < sc->sc_nqueues; i++) {
   5585 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5586 			txq->txq_fifo_head = 0;
   5587 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5588 			txq->txq_fifo_size =
   5589 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5590 			txq->txq_fifo_stall = 0;
   5591 		}
   5592 		break;
   5593 	case WM_T_82571:
   5594 	case WM_T_82572:
   5595 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5596 	case WM_T_80003:
   5597 		sc->sc_pba = PBA_32K;
   5598 		break;
   5599 	case WM_T_82573:
   5600 		sc->sc_pba = PBA_12K;
   5601 		break;
   5602 	case WM_T_82574:
   5603 	case WM_T_82583:
   5604 		sc->sc_pba = PBA_20K;
   5605 		break;
   5606 	case WM_T_82576:
   5607 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5608 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5609 		break;
   5610 	case WM_T_82580:
   5611 	case WM_T_I350:
   5612 	case WM_T_I354:
   5613 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5614 		break;
   5615 	case WM_T_I210:
   5616 	case WM_T_I211:
   5617 		sc->sc_pba = PBA_34K;
   5618 		break;
   5619 	case WM_T_ICH8:
   5620 		/* Workaround for a bit corruption issue in FIFO memory */
   5621 		sc->sc_pba = PBA_8K;
   5622 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5623 		break;
   5624 	case WM_T_ICH9:
   5625 	case WM_T_ICH10:
   5626 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5627 		    PBA_14K : PBA_10K;
   5628 		break;
   5629 	case WM_T_PCH:
   5630 	case WM_T_PCH2:	/* XXX 14K? */
   5631 	case WM_T_PCH_LPT:
   5632 	case WM_T_PCH_SPT:
   5633 	case WM_T_PCH_CNP:
   5634 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5635 		    PBA_12K : PBA_26K;
   5636 		break;
   5637 	default:
   5638 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5639 		    PBA_40K : PBA_48K;
   5640 		break;
   5641 	}
   5642 	/*
   5643 	 * Only old or non-multiqueue devices have the PBA register
   5644 	 * XXX Need special handling for 82575.
   5645 	 */
   5646 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5647 	    || (sc->sc_type == WM_T_82575))
   5648 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5649 
   5650 	/* Prevent the PCI-E bus from sticking */
   5651 	if (sc->sc_flags & WM_F_PCIE) {
   5652 		int timeout = 800;
   5653 
   5654 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5655 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5656 
   5657 		while (timeout--) {
   5658 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5659 			    == 0)
   5660 				break;
   5661 			delay(100);
   5662 		}
   5663 		if (timeout == 0)
   5664 			device_printf(sc->sc_dev,
   5665 			    "failed to disable bus mastering\n");
   5666 	}
   5667 
   5668 	/* Set the completion timeout for interface */
   5669 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5670 	    || (sc->sc_type == WM_T_82580)
   5671 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5672 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5673 		wm_set_pcie_completion_timeout(sc);
   5674 
   5675 	/* Clear interrupt */
   5676 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5677 	if (wm_is_using_msix(sc)) {
   5678 		if (sc->sc_type != WM_T_82574) {
   5679 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5680 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5681 		} else
   5682 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5683 	}
   5684 
   5685 	/* Stop the transmit and receive processes. */
   5686 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5687 	sc->sc_rctl &= ~RCTL_EN;
   5688 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5689 	CSR_WRITE_FLUSH(sc);
   5690 
   5691 	/* XXX set_tbi_sbp_82543() */
   5692 
   5693 	delay(10*1000);
   5694 
   5695 	/* Must acquire the MDIO ownership before MAC reset */
   5696 	switch (sc->sc_type) {
   5697 	case WM_T_82573:
   5698 	case WM_T_82574:
   5699 	case WM_T_82583:
   5700 		error = wm_get_hw_semaphore_82573(sc);
   5701 		break;
   5702 	default:
   5703 		break;
   5704 	}
   5705 
   5706 	/*
   5707 	 * 82541 Errata 29? & 82547 Errata 28?
   5708 	 * See also the description about PHY_RST bit in CTRL register
   5709 	 * in 8254x_GBe_SDM.pdf.
   5710 	 */
   5711 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5712 		CSR_WRITE(sc, WMREG_CTRL,
   5713 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5714 		CSR_WRITE_FLUSH(sc);
   5715 		delay(5000);
   5716 	}
   5717 
   5718 	switch (sc->sc_type) {
   5719 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5720 	case WM_T_82541:
   5721 	case WM_T_82541_2:
   5722 	case WM_T_82547:
   5723 	case WM_T_82547_2:
   5724 		/*
   5725 		 * On some chipsets, a reset through a memory-mapped write
   5726 		 * cycle can cause the chip to reset before completing the
   5727 		 * write cycle. This causes major headache that can be avoided
   5728 		 * by issuing the reset via indirect register writes through
   5729 		 * I/O space.
   5730 		 *
   5731 		 * So, if we successfully mapped the I/O BAR at attach time,
   5732 		 * use that. Otherwise, try our luck with a memory-mapped
   5733 		 * reset.
   5734 		 */
   5735 		if (sc->sc_flags & WM_F_IOH_VALID)
   5736 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5737 		else
   5738 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5739 		break;
   5740 	case WM_T_82545_3:
   5741 	case WM_T_82546_3:
   5742 		/* Use the shadow control register on these chips. */
   5743 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5744 		break;
   5745 	case WM_T_80003:
   5746 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5747 		if (sc->phy.acquire(sc) != 0)
   5748 			break;
   5749 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5750 		sc->phy.release(sc);
   5751 		break;
   5752 	case WM_T_ICH8:
   5753 	case WM_T_ICH9:
   5754 	case WM_T_ICH10:
   5755 	case WM_T_PCH:
   5756 	case WM_T_PCH2:
   5757 	case WM_T_PCH_LPT:
   5758 	case WM_T_PCH_SPT:
   5759 	case WM_T_PCH_CNP:
   5760 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5761 		if (wm_phy_resetisblocked(sc) == false) {
   5762 			/*
   5763 			 * Gate automatic PHY configuration by hardware on
   5764 			 * non-managed 82579
   5765 			 */
   5766 			if ((sc->sc_type == WM_T_PCH2)
   5767 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5768 				== 0))
   5769 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5770 
   5771 			reg |= CTRL_PHY_RESET;
   5772 			phy_reset = 1;
   5773 		} else
   5774 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5775 		if (sc->phy.acquire(sc) != 0)
   5776 			break;
   5777 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5778 		/* Don't insert a completion barrier when reset */
   5779 		delay(20*1000);
   5780 		/*
   5781 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
   5782 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
   5783 		 * only. See also wm_get_swflag_ich8lan().
   5784 		 */
   5785 		mutex_exit(sc->sc_ich_phymtx);
   5786 		break;
   5787 	case WM_T_82580:
   5788 	case WM_T_I350:
   5789 	case WM_T_I354:
   5790 	case WM_T_I210:
   5791 	case WM_T_I211:
   5792 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5793 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5794 			CSR_WRITE_FLUSH(sc);
   5795 		delay(5000);
   5796 		break;
   5797 	case WM_T_82542_2_0:
   5798 	case WM_T_82542_2_1:
   5799 	case WM_T_82543:
   5800 	case WM_T_82540:
   5801 	case WM_T_82545:
   5802 	case WM_T_82546:
   5803 	case WM_T_82571:
   5804 	case WM_T_82572:
   5805 	case WM_T_82573:
   5806 	case WM_T_82574:
   5807 	case WM_T_82575:
   5808 	case WM_T_82576:
   5809 	case WM_T_82583:
   5810 	default:
   5811 		/* Everything else can safely use the documented method. */
   5812 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5813 		break;
   5814 	}
   5815 
   5816 	/* Must release the MDIO ownership after MAC reset */
   5817 	switch (sc->sc_type) {
   5818 	case WM_T_82573:
   5819 	case WM_T_82574:
   5820 	case WM_T_82583:
   5821 		if (error == 0)
   5822 			wm_put_hw_semaphore_82573(sc);
   5823 		break;
   5824 	default:
   5825 		break;
   5826 	}
   5827 
   5828 	/* Set Phy Config Counter to 50msec */
   5829 	if (sc->sc_type == WM_T_PCH2) {
   5830 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5831 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5832 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5833 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5834 	}
   5835 
   5836 	if (phy_reset != 0)
   5837 		wm_get_cfg_done(sc);
   5838 
   5839 	/* Reload EEPROM */
   5840 	switch (sc->sc_type) {
   5841 	case WM_T_82542_2_0:
   5842 	case WM_T_82542_2_1:
   5843 	case WM_T_82543:
   5844 	case WM_T_82544:
   5845 		delay(10);
   5846 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5847 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5848 		CSR_WRITE_FLUSH(sc);
   5849 		delay(2000);
   5850 		break;
   5851 	case WM_T_82540:
   5852 	case WM_T_82545:
   5853 	case WM_T_82545_3:
   5854 	case WM_T_82546:
   5855 	case WM_T_82546_3:
   5856 		delay(5*1000);
   5857 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5858 		break;
   5859 	case WM_T_82541:
   5860 	case WM_T_82541_2:
   5861 	case WM_T_82547:
   5862 	case WM_T_82547_2:
   5863 		delay(20000);
   5864 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5865 		break;
   5866 	case WM_T_82571:
   5867 	case WM_T_82572:
   5868 	case WM_T_82573:
   5869 	case WM_T_82574:
   5870 	case WM_T_82583:
   5871 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5872 			delay(10);
   5873 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5874 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5875 			CSR_WRITE_FLUSH(sc);
   5876 		}
   5877 		/* check EECD_EE_AUTORD */
   5878 		wm_get_auto_rd_done(sc);
   5879 		/*
   5880 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5881 		 * is set.
   5882 		 */
   5883 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5884 		    || (sc->sc_type == WM_T_82583))
   5885 			delay(25*1000);
   5886 		break;
   5887 	case WM_T_82575:
   5888 	case WM_T_82576:
   5889 	case WM_T_82580:
   5890 	case WM_T_I350:
   5891 	case WM_T_I354:
   5892 	case WM_T_I210:
   5893 	case WM_T_I211:
   5894 	case WM_T_80003:
   5895 		/* check EECD_EE_AUTORD */
   5896 		wm_get_auto_rd_done(sc);
   5897 		break;
   5898 	case WM_T_ICH8:
   5899 	case WM_T_ICH9:
   5900 	case WM_T_ICH10:
   5901 	case WM_T_PCH:
   5902 	case WM_T_PCH2:
   5903 	case WM_T_PCH_LPT:
   5904 	case WM_T_PCH_SPT:
   5905 	case WM_T_PCH_CNP:
   5906 		break;
   5907 	default:
   5908 		panic("%s: unknown type\n", __func__);
   5909 	}
   5910 
   5911 	/* Check whether EEPROM is present or not */
   5912 	switch (sc->sc_type) {
   5913 	case WM_T_82575:
   5914 	case WM_T_82576:
   5915 	case WM_T_82580:
   5916 	case WM_T_I350:
   5917 	case WM_T_I354:
   5918 	case WM_T_ICH8:
   5919 	case WM_T_ICH9:
   5920 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5921 			/* Not found */
   5922 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5923 			if (sc->sc_type == WM_T_82575)
   5924 				wm_reset_init_script_82575(sc);
   5925 		}
   5926 		break;
   5927 	default:
   5928 		break;
   5929 	}
   5930 
   5931 	if (phy_reset != 0)
   5932 		wm_phy_post_reset(sc);
   5933 
   5934 	if ((sc->sc_type == WM_T_82580)
   5935 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5936 		/* Clear global device reset status bit */
   5937 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5938 	}
   5939 
   5940 	/* Clear any pending interrupt events. */
   5941 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5942 	reg = CSR_READ(sc, WMREG_ICR);
   5943 	if (wm_is_using_msix(sc)) {
   5944 		if (sc->sc_type != WM_T_82574) {
   5945 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5946 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5947 		} else
   5948 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5949 	}
   5950 
   5951 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5952 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5953 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5954 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5955 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5956 		reg |= KABGTXD_BGSQLBIAS;
   5957 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5958 	}
   5959 
   5960 	/* Reload sc_ctrl */
   5961 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5962 
   5963 	wm_set_eee(sc);
   5964 
   5965 	/*
   5966 	 * For PCH, this write will make sure that any noise will be detected
   5967 	 * as a CRC error and be dropped rather than show up as a bad packet
   5968 	 * to the DMA engine
   5969 	 */
   5970 	if (sc->sc_type == WM_T_PCH)
   5971 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5972 
   5973 	if (sc->sc_type >= WM_T_82544)
   5974 		CSR_WRITE(sc, WMREG_WUC, 0);
   5975 
   5976 	if (sc->sc_type < WM_T_82575)
   5977 		wm_disable_aspm(sc); /* Workaround for some chips */
   5978 
   5979 	wm_reset_mdicnfg_82580(sc);
   5980 
   5981 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5982 		wm_pll_workaround_i210(sc);
   5983 
   5984 	if (sc->sc_type == WM_T_80003) {
   5985 		/* Default to TRUE to enable the MDIC W/A */
   5986 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5987 
   5988 		rv = wm_kmrn_readreg(sc,
   5989 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5990 		if (rv == 0) {
   5991 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5992 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5993 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5994 			else
   5995 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5996 		}
   5997 	}
   5998 }
   5999 
   6000 /*
   6001  * wm_add_rxbuf:
   6002  *
   6003  *	Add a receive buffer to the indiciated descriptor.
   6004  */
   6005 static int
   6006 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   6007 {
   6008 	struct wm_softc *sc = rxq->rxq_sc;
   6009 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   6010 	struct mbuf *m;
   6011 	int error;
   6012 
   6013 	KASSERT(mutex_owned(rxq->rxq_lock));
   6014 
   6015 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   6016 	if (m == NULL)
   6017 		return ENOBUFS;
   6018 
   6019 	MCLGET(m, M_DONTWAIT);
   6020 	if ((m->m_flags & M_EXT) == 0) {
   6021 		m_freem(m);
   6022 		return ENOBUFS;
   6023 	}
   6024 
   6025 	if (rxs->rxs_mbuf != NULL)
   6026 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   6027 
   6028 	rxs->rxs_mbuf = m;
   6029 
   6030 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   6031 	/*
   6032 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   6033 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   6034 	 */
   6035 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   6036 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   6037 	if (error) {
   6038 		/* XXX XXX XXX */
   6039 		aprint_error_dev(sc->sc_dev,
   6040 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   6041 		panic("wm_add_rxbuf");
   6042 	}
   6043 
   6044 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6045 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   6046 
   6047 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6048 		if ((sc->sc_rctl & RCTL_EN) != 0)
   6049 			wm_init_rxdesc(rxq, idx);
   6050 	} else
   6051 		wm_init_rxdesc(rxq, idx);
   6052 
   6053 	return 0;
   6054 }
   6055 
   6056 /*
   6057  * wm_rxdrain:
   6058  *
   6059  *	Drain the receive queue.
   6060  */
   6061 static void
   6062 wm_rxdrain(struct wm_rxqueue *rxq)
   6063 {
   6064 	struct wm_softc *sc = rxq->rxq_sc;
   6065 	struct wm_rxsoft *rxs;
   6066 	int i;
   6067 
   6068 	KASSERT(mutex_owned(rxq->rxq_lock));
   6069 
   6070 	for (i = 0; i < WM_NRXDESC; i++) {
   6071 		rxs = &rxq->rxq_soft[i];
   6072 		if (rxs->rxs_mbuf != NULL) {
   6073 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   6074 			m_freem(rxs->rxs_mbuf);
   6075 			rxs->rxs_mbuf = NULL;
   6076 		}
   6077 	}
   6078 }
   6079 
   6080 /*
   6081  * Setup registers for RSS.
   6082  *
   6083  * XXX not yet VMDq support
   6084  */
   6085 static void
   6086 wm_init_rss(struct wm_softc *sc)
   6087 {
   6088 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   6089 	int i;
   6090 
   6091 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   6092 
   6093 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   6094 		unsigned int qid, reta_ent;
   6095 
   6096 		qid  = i % sc->sc_nqueues;
   6097 		switch (sc->sc_type) {
   6098 		case WM_T_82574:
   6099 			reta_ent = __SHIFTIN(qid,
   6100 			    RETA_ENT_QINDEX_MASK_82574);
   6101 			break;
   6102 		case WM_T_82575:
   6103 			reta_ent = __SHIFTIN(qid,
   6104 			    RETA_ENT_QINDEX1_MASK_82575);
   6105 			break;
   6106 		default:
   6107 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   6108 			break;
   6109 		}
   6110 
   6111 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   6112 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   6113 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   6114 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   6115 	}
   6116 
   6117 	rss_getkey((uint8_t *)rss_key);
   6118 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   6119 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   6120 
   6121 	if (sc->sc_type == WM_T_82574)
   6122 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   6123 	else
   6124 		mrqc = MRQC_ENABLE_RSS_MQ;
   6125 
   6126 	/*
   6127 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   6128 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   6129 	 */
   6130 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   6131 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   6132 #if 0
   6133 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   6134 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   6135 #endif
   6136 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   6137 
   6138 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   6139 }
   6140 
   6141 /*
   6142  * Adjust TX and RX queue numbers which the system actulally uses.
   6143  *
   6144  * The numbers are affected by below parameters.
   6145  *     - The nubmer of hardware queues
   6146  *     - The number of MSI-X vectors (= "nvectors" argument)
   6147  *     - ncpu
   6148  */
   6149 static void
   6150 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   6151 {
   6152 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   6153 
   6154 	if (nvectors < 2) {
   6155 		sc->sc_nqueues = 1;
   6156 		return;
   6157 	}
   6158 
   6159 	switch (sc->sc_type) {
   6160 	case WM_T_82572:
   6161 		hw_ntxqueues = 2;
   6162 		hw_nrxqueues = 2;
   6163 		break;
   6164 	case WM_T_82574:
   6165 		hw_ntxqueues = 2;
   6166 		hw_nrxqueues = 2;
   6167 		break;
   6168 	case WM_T_82575:
   6169 		hw_ntxqueues = 4;
   6170 		hw_nrxqueues = 4;
   6171 		break;
   6172 	case WM_T_82576:
   6173 		hw_ntxqueues = 16;
   6174 		hw_nrxqueues = 16;
   6175 		break;
   6176 	case WM_T_82580:
   6177 	case WM_T_I350:
   6178 	case WM_T_I354:
   6179 		hw_ntxqueues = 8;
   6180 		hw_nrxqueues = 8;
   6181 		break;
   6182 	case WM_T_I210:
   6183 		hw_ntxqueues = 4;
   6184 		hw_nrxqueues = 4;
   6185 		break;
   6186 	case WM_T_I211:
   6187 		hw_ntxqueues = 2;
   6188 		hw_nrxqueues = 2;
   6189 		break;
   6190 		/*
   6191 		 * The below Ethernet controllers do not support MSI-X;
   6192 		 * this driver doesn't let them use multiqueue.
   6193 		 *     - WM_T_80003
   6194 		 *     - WM_T_ICH8
   6195 		 *     - WM_T_ICH9
   6196 		 *     - WM_T_ICH10
   6197 		 *     - WM_T_PCH
   6198 		 *     - WM_T_PCH2
   6199 		 *     - WM_T_PCH_LPT
   6200 		 */
   6201 	default:
   6202 		hw_ntxqueues = 1;
   6203 		hw_nrxqueues = 1;
   6204 		break;
   6205 	}
   6206 
   6207 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   6208 
   6209 	/*
   6210 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   6211 	 * the number of queues used actually.
   6212 	 */
   6213 	if (nvectors < hw_nqueues + 1)
   6214 		sc->sc_nqueues = nvectors - 1;
   6215 	else
   6216 		sc->sc_nqueues = hw_nqueues;
   6217 
   6218 	/*
   6219 	 * As queues more than CPUs cannot improve scaling, we limit
   6220 	 * the number of queues used actually.
   6221 	 */
   6222 	if (ncpu < sc->sc_nqueues)
   6223 		sc->sc_nqueues = ncpu;
   6224 }
   6225 
   6226 static inline bool
   6227 wm_is_using_msix(struct wm_softc *sc)
   6228 {
   6229 
   6230 	return (sc->sc_nintrs > 1);
   6231 }
   6232 
   6233 static inline bool
   6234 wm_is_using_multiqueue(struct wm_softc *sc)
   6235 {
   6236 
   6237 	return (sc->sc_nqueues > 1);
   6238 }
   6239 
   6240 static int
   6241 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6242 {
   6243 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6244 
   6245 	wmq->wmq_id = qidx;
   6246 	wmq->wmq_intr_idx = intr_idx;
   6247 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   6248 	    wm_handle_queue, wmq);
   6249 	if (wmq->wmq_si != NULL)
   6250 		return 0;
   6251 
   6252 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6253 	    wmq->wmq_id);
   6254 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6255 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6256 	return ENOMEM;
   6257 }
   6258 
   6259 /*
   6260  * Both single interrupt MSI and INTx can use this function.
   6261  */
   6262 static int
   6263 wm_setup_legacy(struct wm_softc *sc)
   6264 {
   6265 	pci_chipset_tag_t pc = sc->sc_pc;
   6266 	const char *intrstr = NULL;
   6267 	char intrbuf[PCI_INTRSTR_LEN];
   6268 	int error;
   6269 
   6270 	error = wm_alloc_txrx_queues(sc);
   6271 	if (error) {
   6272 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6273 		    error);
   6274 		return ENOMEM;
   6275 	}
   6276 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6277 	    sizeof(intrbuf));
   6278 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6279 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6280 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6281 	if (sc->sc_ihs[0] == NULL) {
   6282 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6283 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6284 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6285 		return ENOMEM;
   6286 	}
   6287 
   6288 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6289 	sc->sc_nintrs = 1;
   6290 
   6291 	return wm_softint_establish_queue(sc, 0, 0);
   6292 }
   6293 
   6294 static int
   6295 wm_setup_msix(struct wm_softc *sc)
   6296 {
   6297 	void *vih;
   6298 	kcpuset_t *affinity;
   6299 	int qidx, error, intr_idx, txrx_established;
   6300 	pci_chipset_tag_t pc = sc->sc_pc;
   6301 	const char *intrstr = NULL;
   6302 	char intrbuf[PCI_INTRSTR_LEN];
   6303 	char intr_xname[INTRDEVNAMEBUF];
   6304 
   6305 	if (sc->sc_nqueues < ncpu) {
   6306 		/*
   6307 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6308 		 * interrupts start from CPU#1.
   6309 		 */
   6310 		sc->sc_affinity_offset = 1;
   6311 	} else {
   6312 		/*
   6313 		 * In this case, this device use all CPUs. So, we unify
   6314 		 * affinitied cpu_index to msix vector number for readability.
   6315 		 */
   6316 		sc->sc_affinity_offset = 0;
   6317 	}
   6318 
   6319 	error = wm_alloc_txrx_queues(sc);
   6320 	if (error) {
   6321 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6322 		    error);
   6323 		return ENOMEM;
   6324 	}
   6325 
   6326 	kcpuset_create(&affinity, false);
   6327 	intr_idx = 0;
   6328 
   6329 	/*
   6330 	 * TX and RX
   6331 	 */
   6332 	txrx_established = 0;
   6333 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6334 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6335 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6336 
   6337 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6338 		    sizeof(intrbuf));
   6339 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6340 		    PCI_INTR_MPSAFE, true);
   6341 		memset(intr_xname, 0, sizeof(intr_xname));
   6342 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6343 		    device_xname(sc->sc_dev), qidx);
   6344 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6345 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6346 		if (vih == NULL) {
   6347 			aprint_error_dev(sc->sc_dev,
   6348 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6349 			    intrstr ? " at " : "",
   6350 			    intrstr ? intrstr : "");
   6351 
   6352 			goto fail;
   6353 		}
   6354 		kcpuset_zero(affinity);
   6355 		/* Round-robin affinity */
   6356 		kcpuset_set(affinity, affinity_to);
   6357 		error = interrupt_distribute(vih, affinity, NULL);
   6358 		if (error == 0) {
   6359 			aprint_normal_dev(sc->sc_dev,
   6360 			    "for TX and RX interrupting at %s affinity to %u\n",
   6361 			    intrstr, affinity_to);
   6362 		} else {
   6363 			aprint_normal_dev(sc->sc_dev,
   6364 			    "for TX and RX interrupting at %s\n", intrstr);
   6365 		}
   6366 		sc->sc_ihs[intr_idx] = vih;
   6367 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6368 			goto fail;
   6369 		txrx_established++;
   6370 		intr_idx++;
   6371 	}
   6372 
   6373 	/* LINK */
   6374 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6375 	    sizeof(intrbuf));
   6376 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6377 	memset(intr_xname, 0, sizeof(intr_xname));
   6378 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6379 	    device_xname(sc->sc_dev));
   6380 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6381 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6382 	if (vih == NULL) {
   6383 		aprint_error_dev(sc->sc_dev,
   6384 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6385 		    intrstr ? " at " : "",
   6386 		    intrstr ? intrstr : "");
   6387 
   6388 		goto fail;
   6389 	}
   6390 	/* Keep default affinity to LINK interrupt */
   6391 	aprint_normal_dev(sc->sc_dev,
   6392 	    "for LINK interrupting at %s\n", intrstr);
   6393 	sc->sc_ihs[intr_idx] = vih;
   6394 	sc->sc_link_intr_idx = intr_idx;
   6395 
   6396 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6397 	kcpuset_destroy(affinity);
   6398 	return 0;
   6399 
   6400 fail:
   6401 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6402 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6403 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6404 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6405 	}
   6406 
   6407 	kcpuset_destroy(affinity);
   6408 	return ENOMEM;
   6409 }
   6410 
   6411 static void
   6412 wm_unset_stopping_flags(struct wm_softc *sc)
   6413 {
   6414 	int i;
   6415 
   6416 	KASSERT(mutex_owned(sc->sc_core_lock));
   6417 
   6418 	/* Must unset stopping flags in ascending order. */
   6419 	for (i = 0; i < sc->sc_nqueues; i++) {
   6420 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6421 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6422 
   6423 		mutex_enter(txq->txq_lock);
   6424 		txq->txq_stopping = false;
   6425 		mutex_exit(txq->txq_lock);
   6426 
   6427 		mutex_enter(rxq->rxq_lock);
   6428 		rxq->rxq_stopping = false;
   6429 		mutex_exit(rxq->rxq_lock);
   6430 	}
   6431 
   6432 	sc->sc_core_stopping = false;
   6433 }
   6434 
   6435 static void
   6436 wm_set_stopping_flags(struct wm_softc *sc)
   6437 {
   6438 	int i;
   6439 
   6440 	KASSERT(mutex_owned(sc->sc_core_lock));
   6441 
   6442 	sc->sc_core_stopping = true;
   6443 
   6444 	/* Must set stopping flags in ascending order. */
   6445 	for (i = 0; i < sc->sc_nqueues; i++) {
   6446 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6447 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6448 
   6449 		mutex_enter(rxq->rxq_lock);
   6450 		rxq->rxq_stopping = true;
   6451 		mutex_exit(rxq->rxq_lock);
   6452 
   6453 		mutex_enter(txq->txq_lock);
   6454 		txq->txq_stopping = true;
   6455 		mutex_exit(txq->txq_lock);
   6456 	}
   6457 }
   6458 
   6459 /*
   6460  * Write interrupt interval value to ITR or EITR
   6461  */
   6462 static void
   6463 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6464 {
   6465 
   6466 	if (!wmq->wmq_set_itr)
   6467 		return;
   6468 
   6469 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6470 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6471 
   6472 		/*
   6473 		 * 82575 doesn't have CNT_INGR field.
   6474 		 * So, overwrite counter field by software.
   6475 		 */
   6476 		if (sc->sc_type == WM_T_82575)
   6477 			eitr |= __SHIFTIN(wmq->wmq_itr,
   6478 			    EITR_COUNTER_MASK_82575);
   6479 		else
   6480 			eitr |= EITR_CNT_INGR;
   6481 
   6482 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6483 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6484 		/*
   6485 		 * 82574 has both ITR and EITR. SET EITR when we use
   6486 		 * the multi queue function with MSI-X.
   6487 		 */
   6488 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6489 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6490 	} else {
   6491 		KASSERT(wmq->wmq_id == 0);
   6492 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6493 	}
   6494 
   6495 	wmq->wmq_set_itr = false;
   6496 }
   6497 
   6498 /*
   6499  * TODO
   6500  * Below dynamic calculation of itr is almost the same as Linux igb,
   6501  * however it does not fit to wm(4). So, we will have been disable AIM
   6502  * until we will find appropriate calculation of itr.
   6503  */
   6504 /*
   6505  * Calculate interrupt interval value to be going to write register in
   6506  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6507  */
   6508 static void
   6509 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6510 {
   6511 #ifdef NOTYET
   6512 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6513 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6514 	uint32_t avg_size = 0;
   6515 	uint32_t new_itr;
   6516 
   6517 	if (rxq->rxq_packets)
   6518 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6519 	if (txq->txq_packets)
   6520 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6521 
   6522 	if (avg_size == 0) {
   6523 		new_itr = 450; /* restore default value */
   6524 		goto out;
   6525 	}
   6526 
   6527 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6528 	avg_size += 24;
   6529 
   6530 	/* Don't starve jumbo frames */
   6531 	avg_size = uimin(avg_size, 3000);
   6532 
   6533 	/* Give a little boost to mid-size frames */
   6534 	if ((avg_size > 300) && (avg_size < 1200))
   6535 		new_itr = avg_size / 3;
   6536 	else
   6537 		new_itr = avg_size / 2;
   6538 
   6539 out:
   6540 	/*
   6541 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6542 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6543 	 */
   6544 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6545 		new_itr *= 4;
   6546 
   6547 	if (new_itr != wmq->wmq_itr) {
   6548 		wmq->wmq_itr = new_itr;
   6549 		wmq->wmq_set_itr = true;
   6550 	} else
   6551 		wmq->wmq_set_itr = false;
   6552 
   6553 	rxq->rxq_packets = 0;
   6554 	rxq->rxq_bytes = 0;
   6555 	txq->txq_packets = 0;
   6556 	txq->txq_bytes = 0;
   6557 #endif
   6558 }
   6559 
   6560 static void
   6561 wm_init_sysctls(struct wm_softc *sc)
   6562 {
   6563 	struct sysctllog **log;
   6564 	const struct sysctlnode *rnode, *qnode, *cnode;
   6565 	int i, rv;
   6566 	const char *dvname;
   6567 
   6568 	log = &sc->sc_sysctllog;
   6569 	dvname = device_xname(sc->sc_dev);
   6570 
   6571 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6572 	    0, CTLTYPE_NODE, dvname,
   6573 	    SYSCTL_DESCR("wm information and settings"),
   6574 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6575 	if (rv != 0)
   6576 		goto err;
   6577 
   6578 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6579 	    CTLTYPE_BOOL, "txrx_workqueue",
   6580 	    SYSCTL_DESCR("Use workqueue for packet processing"),
   6581 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6582 	if (rv != 0)
   6583 		goto teardown;
   6584 
   6585 	for (i = 0; i < sc->sc_nqueues; i++) {
   6586 		struct wm_queue *wmq = &sc->sc_queue[i];
   6587 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6588 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6589 
   6590 		snprintf(sc->sc_queue[i].sysctlname,
   6591 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6592 
   6593 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6594 		    0, CTLTYPE_NODE,
   6595 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6596 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6597 			break;
   6598 
   6599 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6600 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6601 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6602 		    NULL, 0, &txq->txq_free,
   6603 		    0, CTL_CREATE, CTL_EOL) != 0)
   6604 			break;
   6605 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6606 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6607 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6608 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6609 		    0, CTL_CREATE, CTL_EOL) != 0)
   6610 			break;
   6611 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6612 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6613 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6614 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6615 		    0, CTL_CREATE, CTL_EOL) != 0)
   6616 			break;
   6617 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6618 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6619 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6620 		    NULL, 0, &txq->txq_next,
   6621 		    0, CTL_CREATE, CTL_EOL) != 0)
   6622 			break;
   6623 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6624 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6625 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6626 		    NULL, 0, &txq->txq_sfree,
   6627 		    0, CTL_CREATE, CTL_EOL) != 0)
   6628 			break;
   6629 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6630 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6631 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6632 		    NULL, 0, &txq->txq_snext,
   6633 		    0, CTL_CREATE, CTL_EOL) != 0)
   6634 			break;
   6635 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6636 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6637 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6638 		    NULL, 0, &txq->txq_sdirty,
   6639 		    0, CTL_CREATE, CTL_EOL) != 0)
   6640 			break;
   6641 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6642 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6643 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6644 		    NULL, 0, &txq->txq_flags,
   6645 		    0, CTL_CREATE, CTL_EOL) != 0)
   6646 			break;
   6647 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6648 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6649 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6650 		    NULL, 0, &txq->txq_stopping,
   6651 		    0, CTL_CREATE, CTL_EOL) != 0)
   6652 			break;
   6653 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6654 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6655 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6656 		    NULL, 0, &txq->txq_sending,
   6657 		    0, CTL_CREATE, CTL_EOL) != 0)
   6658 			break;
   6659 
   6660 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6661 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6662 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6663 		    NULL, 0, &rxq->rxq_ptr,
   6664 		    0, CTL_CREATE, CTL_EOL) != 0)
   6665 			break;
   6666 	}
   6667 
   6668 #ifdef WM_DEBUG
   6669 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6670 	    CTLTYPE_INT, "debug_flags",
   6671 	    SYSCTL_DESCR(
   6672 		    "Debug flags:\n"	\
   6673 		    "\t0x01 LINK\n"	\
   6674 		    "\t0x02 TX\n"	\
   6675 		    "\t0x04 RX\n"	\
   6676 		    "\t0x08 GMII\n"	\
   6677 		    "\t0x10 MANAGE\n"	\
   6678 		    "\t0x20 NVM\n"	\
   6679 		    "\t0x40 INIT\n"	\
   6680 		    "\t0x80 LOCK"),
   6681 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6682 	if (rv != 0)
   6683 		goto teardown;
   6684 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6685 	    CTLTYPE_BOOL, "trigger_reset",
   6686 	    SYSCTL_DESCR("Trigger an interface reset"),
   6687 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
   6688 	if (rv != 0)
   6689 		goto teardown;
   6690 #endif
   6691 
   6692 	return;
   6693 
   6694 teardown:
   6695 	sysctl_teardown(log);
   6696 err:
   6697 	sc->sc_sysctllog = NULL;
   6698 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6699 	    __func__, rv);
   6700 }
   6701 
   6702 /*
   6703  * wm_init:		[ifnet interface function]
   6704  *
   6705  *	Initialize the interface.
   6706  */
   6707 static int
   6708 wm_init(struct ifnet *ifp)
   6709 {
   6710 	struct wm_softc *sc = ifp->if_softc;
   6711 	int ret;
   6712 
   6713 	KASSERT(IFNET_LOCKED(ifp));
   6714 
   6715 	if (sc->sc_dying)
   6716 		return ENXIO;
   6717 
   6718 	mutex_enter(sc->sc_core_lock);
   6719 	ret = wm_init_locked(ifp);
   6720 	mutex_exit(sc->sc_core_lock);
   6721 
   6722 	return ret;
   6723 }
   6724 
   6725 static int
   6726 wm_init_locked(struct ifnet *ifp)
   6727 {
   6728 	struct wm_softc *sc = ifp->if_softc;
   6729 	struct ethercom *ec = &sc->sc_ethercom;
   6730 	int i, j, trynum, error = 0;
   6731 	uint32_t reg, sfp_mask = 0;
   6732 
   6733 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6734 		device_xname(sc->sc_dev), __func__));
   6735 	KASSERT(IFNET_LOCKED(ifp));
   6736 	KASSERT(mutex_owned(sc->sc_core_lock));
   6737 
   6738 	/*
   6739 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6740 	 * There is a small but measurable benefit to avoiding the adjusment
   6741 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6742 	 * on such platforms.  One possibility is that the DMA itself is
   6743 	 * slightly more efficient if the front of the entire packet (instead
   6744 	 * of the front of the headers) is aligned.
   6745 	 *
   6746 	 * Note we must always set align_tweak to 0 if we are using
   6747 	 * jumbo frames.
   6748 	 */
   6749 #ifdef __NO_STRICT_ALIGNMENT
   6750 	sc->sc_align_tweak = 0;
   6751 #else
   6752 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6753 		sc->sc_align_tweak = 0;
   6754 	else
   6755 		sc->sc_align_tweak = 2;
   6756 #endif /* __NO_STRICT_ALIGNMENT */
   6757 
   6758 	/* Cancel any pending I/O. */
   6759 	wm_stop_locked(ifp, false, false);
   6760 
   6761 	/* Update statistics before reset */
   6762 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6763 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6764 
   6765 	/* >= PCH_SPT hardware workaround before reset. */
   6766 	if (sc->sc_type >= WM_T_PCH_SPT)
   6767 		wm_flush_desc_rings(sc);
   6768 
   6769 	/* Reset the chip to a known state. */
   6770 	wm_reset(sc);
   6771 
   6772 	/*
   6773 	 * AMT based hardware can now take control from firmware
   6774 	 * Do this after reset.
   6775 	 */
   6776 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6777 		wm_get_hw_control(sc);
   6778 
   6779 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6780 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6781 		wm_legacy_irq_quirk_spt(sc);
   6782 
   6783 	/* Init hardware bits */
   6784 	wm_initialize_hardware_bits(sc);
   6785 
   6786 	/* Reset the PHY. */
   6787 	if (sc->sc_flags & WM_F_HAS_MII)
   6788 		wm_gmii_reset(sc);
   6789 
   6790 	if (sc->sc_type >= WM_T_ICH8) {
   6791 		reg = CSR_READ(sc, WMREG_GCR);
   6792 		/*
   6793 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6794 		 * default after reset.
   6795 		 */
   6796 		if (sc->sc_type == WM_T_ICH8)
   6797 			reg |= GCR_NO_SNOOP_ALL;
   6798 		else
   6799 			reg &= ~GCR_NO_SNOOP_ALL;
   6800 		CSR_WRITE(sc, WMREG_GCR, reg);
   6801 	}
   6802 
   6803 	if ((sc->sc_type >= WM_T_ICH8)
   6804 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6805 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6806 
   6807 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6808 		reg |= CTRL_EXT_RO_DIS;
   6809 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6810 	}
   6811 
   6812 	/* Calculate (E)ITR value */
   6813 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6814 		/*
   6815 		 * For NEWQUEUE's EITR (except for 82575).
   6816 		 * 82575's EITR should be set same throttling value as other
   6817 		 * old controllers' ITR because the interrupt/sec calculation
   6818 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6819 		 *
   6820 		 * 82574's EITR should be set same throttling value as ITR.
   6821 		 *
   6822 		 * For N interrupts/sec, set this value to:
   6823 		 * 1,000,000 / N in contrast to ITR throttling value.
   6824 		 */
   6825 		sc->sc_itr_init = 450;
   6826 	} else if (sc->sc_type >= WM_T_82543) {
   6827 		/*
   6828 		 * Set up the interrupt throttling register (units of 256ns)
   6829 		 * Note that a footnote in Intel's documentation says this
   6830 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6831 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6832 		 * that that is also true for the 1024ns units of the other
   6833 		 * interrupt-related timer registers -- so, really, we ought
   6834 		 * to divide this value by 4 when the link speed is low.
   6835 		 *
   6836 		 * XXX implement this division at link speed change!
   6837 		 */
   6838 
   6839 		/*
   6840 		 * For N interrupts/sec, set this value to:
   6841 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6842 		 * absolute and packet timer values to this value
   6843 		 * divided by 4 to get "simple timer" behavior.
   6844 		 */
   6845 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6846 	}
   6847 
   6848 	error = wm_init_txrx_queues(sc);
   6849 	if (error)
   6850 		goto out;
   6851 
   6852 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6853 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6854 	    (sc->sc_type >= WM_T_82575))
   6855 		wm_serdes_power_up_link_82575(sc);
   6856 
   6857 	/* Clear out the VLAN table -- we don't use it (yet). */
   6858 	CSR_WRITE(sc, WMREG_VET, 0);
   6859 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6860 		trynum = 10; /* Due to hw errata */
   6861 	else
   6862 		trynum = 1;
   6863 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6864 		for (j = 0; j < trynum; j++)
   6865 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6866 
   6867 	/*
   6868 	 * Set up flow-control parameters.
   6869 	 *
   6870 	 * XXX Values could probably stand some tuning.
   6871 	 */
   6872 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6873 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6874 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6875 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6876 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6877 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6878 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6879 	}
   6880 
   6881 	sc->sc_fcrtl = FCRTL_DFLT;
   6882 	if (sc->sc_type < WM_T_82543) {
   6883 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6884 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6885 	} else {
   6886 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6887 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6888 	}
   6889 
   6890 	if (sc->sc_type == WM_T_80003)
   6891 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6892 	else
   6893 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6894 
   6895 	/* Writes the control register. */
   6896 	wm_set_vlan(sc);
   6897 
   6898 	if (sc->sc_flags & WM_F_HAS_MII) {
   6899 		uint16_t kmreg;
   6900 
   6901 		switch (sc->sc_type) {
   6902 		case WM_T_80003:
   6903 		case WM_T_ICH8:
   6904 		case WM_T_ICH9:
   6905 		case WM_T_ICH10:
   6906 		case WM_T_PCH:
   6907 		case WM_T_PCH2:
   6908 		case WM_T_PCH_LPT:
   6909 		case WM_T_PCH_SPT:
   6910 		case WM_T_PCH_CNP:
   6911 			/*
   6912 			 * Set the mac to wait the maximum time between each
   6913 			 * iteration and increase the max iterations when
   6914 			 * polling the phy; this fixes erroneous timeouts at
   6915 			 * 10Mbps.
   6916 			 */
   6917 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6918 			    0xFFFF);
   6919 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6920 			    &kmreg);
   6921 			kmreg |= 0x3F;
   6922 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6923 			    kmreg);
   6924 			break;
   6925 		default:
   6926 			break;
   6927 		}
   6928 
   6929 		if (sc->sc_type == WM_T_80003) {
   6930 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6931 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6932 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6933 
   6934 			/* Bypass RX and TX FIFOs */
   6935 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6936 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6937 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6938 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6939 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6940 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6941 		}
   6942 	}
   6943 #if 0
   6944 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6945 #endif
   6946 
   6947 	/* Set up checksum offload parameters. */
   6948 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6949 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6950 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6951 		reg |= RXCSUM_IPOFL;
   6952 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6953 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6954 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6955 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6956 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6957 
   6958 	/* Set registers about MSI-X */
   6959 	if (wm_is_using_msix(sc)) {
   6960 		uint32_t ivar, qintr_idx;
   6961 		struct wm_queue *wmq;
   6962 		unsigned int qid;
   6963 
   6964 		if (sc->sc_type == WM_T_82575) {
   6965 			/* Interrupt control */
   6966 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6967 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6968 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6969 
   6970 			/* TX and RX */
   6971 			for (i = 0; i < sc->sc_nqueues; i++) {
   6972 				wmq = &sc->sc_queue[i];
   6973 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6974 				    EITR_TX_QUEUE(wmq->wmq_id)
   6975 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6976 			}
   6977 			/* Link status */
   6978 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6979 			    EITR_OTHER);
   6980 		} else if (sc->sc_type == WM_T_82574) {
   6981 			/* Interrupt control */
   6982 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6983 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6984 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6985 
   6986 			/*
   6987 			 * Work around issue with spurious interrupts
   6988 			 * in MSI-X mode.
   6989 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6990 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6991 			 */
   6992 			reg = CSR_READ(sc, WMREG_RFCTL);
   6993 			reg |= WMREG_RFCTL_ACKDIS;
   6994 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6995 
   6996 			ivar = 0;
   6997 			/* TX and RX */
   6998 			for (i = 0; i < sc->sc_nqueues; i++) {
   6999 				wmq = &sc->sc_queue[i];
   7000 				qid = wmq->wmq_id;
   7001 				qintr_idx = wmq->wmq_intr_idx;
   7002 
   7003 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7004 				    IVAR_TX_MASK_Q_82574(qid));
   7005 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   7006 				    IVAR_RX_MASK_Q_82574(qid));
   7007 			}
   7008 			/* Link status */
   7009 			ivar |= __SHIFTIN((IVAR_VALID_82574
   7010 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   7011 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   7012 		} else {
   7013 			/* Interrupt control */
   7014 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   7015 			    | GPIE_EIAME | GPIE_PBA);
   7016 
   7017 			switch (sc->sc_type) {
   7018 			case WM_T_82580:
   7019 			case WM_T_I350:
   7020 			case WM_T_I354:
   7021 			case WM_T_I210:
   7022 			case WM_T_I211:
   7023 				/* TX and RX */
   7024 				for (i = 0; i < sc->sc_nqueues; i++) {
   7025 					wmq = &sc->sc_queue[i];
   7026 					qid = wmq->wmq_id;
   7027 					qintr_idx = wmq->wmq_intr_idx;
   7028 
   7029 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   7030 					ivar &= ~IVAR_TX_MASK_Q(qid);
   7031 					ivar |= __SHIFTIN((qintr_idx
   7032 						| IVAR_VALID),
   7033 					    IVAR_TX_MASK_Q(qid));
   7034 					ivar &= ~IVAR_RX_MASK_Q(qid);
   7035 					ivar |= __SHIFTIN((qintr_idx
   7036 						| IVAR_VALID),
   7037 					    IVAR_RX_MASK_Q(qid));
   7038 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   7039 				}
   7040 				break;
   7041 			case WM_T_82576:
   7042 				/* TX and RX */
   7043 				for (i = 0; i < sc->sc_nqueues; i++) {
   7044 					wmq = &sc->sc_queue[i];
   7045 					qid = wmq->wmq_id;
   7046 					qintr_idx = wmq->wmq_intr_idx;
   7047 
   7048 					ivar = CSR_READ(sc,
   7049 					    WMREG_IVAR_Q_82576(qid));
   7050 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   7051 					ivar |= __SHIFTIN((qintr_idx
   7052 						| IVAR_VALID),
   7053 					    IVAR_TX_MASK_Q_82576(qid));
   7054 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   7055 					ivar |= __SHIFTIN((qintr_idx
   7056 						| IVAR_VALID),
   7057 					    IVAR_RX_MASK_Q_82576(qid));
   7058 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   7059 					    ivar);
   7060 				}
   7061 				break;
   7062 			default:
   7063 				break;
   7064 			}
   7065 
   7066 			/* Link status */
   7067 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   7068 			    IVAR_MISC_OTHER);
   7069 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   7070 		}
   7071 
   7072 		if (wm_is_using_multiqueue(sc)) {
   7073 			wm_init_rss(sc);
   7074 
   7075 			/*
   7076 			** NOTE: Receive Full-Packet Checksum Offload
   7077 			** is mutually exclusive with Multiqueue. However
   7078 			** this is not the same as TCP/IP checksums which
   7079 			** still work.
   7080 			*/
   7081 			reg = CSR_READ(sc, WMREG_RXCSUM);
   7082 			reg |= RXCSUM_PCSD;
   7083 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   7084 		}
   7085 	}
   7086 
   7087 	/* Set up the interrupt registers. */
   7088 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7089 
   7090 	/* Enable SFP module insertion interrupt if it's required */
   7091 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   7092 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   7093 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7094 		sfp_mask = ICR_GPI(0);
   7095 	}
   7096 
   7097 	if (wm_is_using_msix(sc)) {
   7098 		uint32_t mask;
   7099 		struct wm_queue *wmq;
   7100 
   7101 		switch (sc->sc_type) {
   7102 		case WM_T_82574:
   7103 			mask = 0;
   7104 			for (i = 0; i < sc->sc_nqueues; i++) {
   7105 				wmq = &sc->sc_queue[i];
   7106 				mask |= ICR_TXQ(wmq->wmq_id);
   7107 				mask |= ICR_RXQ(wmq->wmq_id);
   7108 			}
   7109 			mask |= ICR_OTHER;
   7110 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   7111 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   7112 			break;
   7113 		default:
   7114 			if (sc->sc_type == WM_T_82575) {
   7115 				mask = 0;
   7116 				for (i = 0; i < sc->sc_nqueues; i++) {
   7117 					wmq = &sc->sc_queue[i];
   7118 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   7119 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   7120 				}
   7121 				mask |= EITR_OTHER;
   7122 			} else {
   7123 				mask = 0;
   7124 				for (i = 0; i < sc->sc_nqueues; i++) {
   7125 					wmq = &sc->sc_queue[i];
   7126 					mask |= 1 << wmq->wmq_intr_idx;
   7127 				}
   7128 				mask |= 1 << sc->sc_link_intr_idx;
   7129 			}
   7130 			CSR_WRITE(sc, WMREG_EIAC, mask);
   7131 			CSR_WRITE(sc, WMREG_EIAM, mask);
   7132 			CSR_WRITE(sc, WMREG_EIMS, mask);
   7133 
   7134 			/* For other interrupts */
   7135 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   7136 			break;
   7137 		}
   7138 	} else {
   7139 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   7140 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   7141 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   7142 	}
   7143 
   7144 	/* Set up the inter-packet gap. */
   7145 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   7146 
   7147 	if (sc->sc_type >= WM_T_82543) {
   7148 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7149 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   7150 			wm_itrs_writereg(sc, wmq);
   7151 		}
   7152 		/*
   7153 		 * Link interrupts occur much less than TX
   7154 		 * interrupts and RX interrupts. So, we don't
   7155 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   7156 		 * FreeBSD's if_igb.
   7157 		 */
   7158 	}
   7159 
   7160 	/* Set the VLAN EtherType. */
   7161 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   7162 
   7163 	/*
   7164 	 * Set up the transmit control register; we start out with
   7165 	 * a collision distance suitable for FDX, but update it when
   7166 	 * we resolve the media type.
   7167 	 */
   7168 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   7169 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   7170 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7171 	if (sc->sc_type >= WM_T_82571)
   7172 		sc->sc_tctl |= TCTL_MULR;
   7173 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7174 
   7175 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7176 		/* Write TDT after TCTL.EN is set. See the document. */
   7177 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   7178 	}
   7179 
   7180 	if (sc->sc_type == WM_T_80003) {
   7181 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   7182 		reg &= ~TCTL_EXT_GCEX_MASK;
   7183 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   7184 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   7185 	}
   7186 
   7187 	/* Set the media. */
   7188 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   7189 		goto out;
   7190 
   7191 	/* Configure for OS presence */
   7192 	wm_init_manageability(sc);
   7193 
   7194 	/*
   7195 	 * Set up the receive control register; we actually program the
   7196 	 * register when we set the receive filter. Use multicast address
   7197 	 * offset type 0.
   7198 	 *
   7199 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   7200 	 * don't enable that feature.
   7201 	 */
   7202 	sc->sc_mchash_type = 0;
   7203 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   7204 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   7205 
   7206 	/* 82574 use one buffer extended Rx descriptor. */
   7207 	if (sc->sc_type == WM_T_82574)
   7208 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   7209 
   7210 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   7211 		sc->sc_rctl |= RCTL_SECRC;
   7212 
   7213 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   7214 	    && (ifp->if_mtu > ETHERMTU)) {
   7215 		sc->sc_rctl |= RCTL_LPE;
   7216 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7217 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   7218 	}
   7219 
   7220 	if (MCLBYTES == 2048)
   7221 		sc->sc_rctl |= RCTL_2k;
   7222 	else {
   7223 		if (sc->sc_type >= WM_T_82543) {
   7224 			switch (MCLBYTES) {
   7225 			case 4096:
   7226 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7227 				break;
   7228 			case 8192:
   7229 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7230 				break;
   7231 			case 16384:
   7232 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7233 				break;
   7234 			default:
   7235 				panic("wm_init: MCLBYTES %d unsupported",
   7236 				    MCLBYTES);
   7237 				break;
   7238 			}
   7239 		} else
   7240 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7241 	}
   7242 
   7243 	/* Enable ECC */
   7244 	switch (sc->sc_type) {
   7245 	case WM_T_82571:
   7246 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7247 		reg |= PBA_ECC_CORR_EN;
   7248 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7249 		break;
   7250 	case WM_T_PCH_LPT:
   7251 	case WM_T_PCH_SPT:
   7252 	case WM_T_PCH_CNP:
   7253 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7254 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7255 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7256 
   7257 		sc->sc_ctrl |= CTRL_MEHE;
   7258 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7259 		break;
   7260 	default:
   7261 		break;
   7262 	}
   7263 
   7264 	/*
   7265 	 * Set the receive filter.
   7266 	 *
   7267 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7268 	 * the setting of RCTL.EN in wm_set_filter()
   7269 	 */
   7270 	wm_set_filter(sc);
   7271 
   7272 	/* On 575 and later set RDT only if RX enabled */
   7273 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7274 		int qidx;
   7275 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7276 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7277 			for (i = 0; i < WM_NRXDESC; i++) {
   7278 				mutex_enter(rxq->rxq_lock);
   7279 				wm_init_rxdesc(rxq, i);
   7280 				mutex_exit(rxq->rxq_lock);
   7281 
   7282 			}
   7283 		}
   7284 	}
   7285 
   7286 	wm_unset_stopping_flags(sc);
   7287 
   7288 	/* Start the one second link check clock. */
   7289 	callout_schedule(&sc->sc_tick_ch, hz);
   7290 
   7291 	/*
   7292 	 * ...all done! (IFNET_LOCKED asserted above.)
   7293 	 */
   7294 	ifp->if_flags |= IFF_RUNNING;
   7295 
   7296 out:
   7297 	/* Save last flags for the callback */
   7298 	sc->sc_if_flags = ifp->if_flags;
   7299 	sc->sc_ec_capenable = ec->ec_capenable;
   7300 	if (error)
   7301 		log(LOG_ERR, "%s: interface not running\n",
   7302 		    device_xname(sc->sc_dev));
   7303 	return error;
   7304 }
   7305 
   7306 /*
   7307  * wm_stop:		[ifnet interface function]
   7308  *
   7309  *	Stop transmission on the interface.
   7310  */
   7311 static void
   7312 wm_stop(struct ifnet *ifp, int disable)
   7313 {
   7314 	struct wm_softc *sc = ifp->if_softc;
   7315 
   7316 	ASSERT_SLEEPABLE();
   7317 	KASSERT(IFNET_LOCKED(ifp));
   7318 
   7319 	mutex_enter(sc->sc_core_lock);
   7320 	wm_stop_locked(ifp, disable ? true : false, true);
   7321 	mutex_exit(sc->sc_core_lock);
   7322 
   7323 	/*
   7324 	 * After wm_set_stopping_flags(), it is guaranteed that
   7325 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7326 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7327 	 * because it can sleep...
   7328 	 * so, call workqueue_wait() here.
   7329 	 */
   7330 	for (int i = 0; i < sc->sc_nqueues; i++)
   7331 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7332 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
   7333 }
   7334 
   7335 static void
   7336 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7337 {
   7338 	struct wm_softc *sc = ifp->if_softc;
   7339 	struct wm_txsoft *txs;
   7340 	int i, qidx;
   7341 
   7342 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7343 		device_xname(sc->sc_dev), __func__));
   7344 	KASSERT(IFNET_LOCKED(ifp));
   7345 	KASSERT(mutex_owned(sc->sc_core_lock));
   7346 
   7347 	wm_set_stopping_flags(sc);
   7348 
   7349 	if (sc->sc_flags & WM_F_HAS_MII) {
   7350 		/* Down the MII. */
   7351 		mii_down(&sc->sc_mii);
   7352 	} else {
   7353 #if 0
   7354 		/* Should we clear PHY's status properly? */
   7355 		wm_reset(sc);
   7356 #endif
   7357 	}
   7358 
   7359 	/* Stop the transmit and receive processes. */
   7360 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7361 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7362 	sc->sc_rctl &= ~RCTL_EN;
   7363 
   7364 	/*
   7365 	 * Clear the interrupt mask to ensure the device cannot assert its
   7366 	 * interrupt line.
   7367 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7368 	 * service any currently pending or shared interrupt.
   7369 	 */
   7370 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7371 	sc->sc_icr = 0;
   7372 	if (wm_is_using_msix(sc)) {
   7373 		if (sc->sc_type != WM_T_82574) {
   7374 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7375 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7376 		} else
   7377 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7378 	}
   7379 
   7380 	/*
   7381 	 * Stop callouts after interrupts are disabled; if we have
   7382 	 * to wait for them, we will be releasing the CORE_LOCK
   7383 	 * briefly, which will unblock interrupts on the current CPU.
   7384 	 */
   7385 
   7386 	/* Stop the one second clock. */
   7387 	if (wait)
   7388 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7389 	else
   7390 		callout_stop(&sc->sc_tick_ch);
   7391 
   7392 	/* Stop the 82547 Tx FIFO stall check timer. */
   7393 	if (sc->sc_type == WM_T_82547) {
   7394 		if (wait)
   7395 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7396 		else
   7397 			callout_stop(&sc->sc_txfifo_ch);
   7398 	}
   7399 
   7400 	/* Release any queued transmit buffers. */
   7401 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7402 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7403 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7404 		struct mbuf *m;
   7405 
   7406 		mutex_enter(txq->txq_lock);
   7407 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7408 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7409 			txs = &txq->txq_soft[i];
   7410 			if (txs->txs_mbuf != NULL) {
   7411 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7412 				m_freem(txs->txs_mbuf);
   7413 				txs->txs_mbuf = NULL;
   7414 			}
   7415 		}
   7416 		/* Drain txq_interq */
   7417 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7418 			m_freem(m);
   7419 		mutex_exit(txq->txq_lock);
   7420 	}
   7421 
   7422 	/* Mark the interface as down and cancel the watchdog timer. */
   7423 	ifp->if_flags &= ~IFF_RUNNING;
   7424 	sc->sc_if_flags = ifp->if_flags;
   7425 
   7426 	if (disable) {
   7427 		for (i = 0; i < sc->sc_nqueues; i++) {
   7428 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7429 			mutex_enter(rxq->rxq_lock);
   7430 			wm_rxdrain(rxq);
   7431 			mutex_exit(rxq->rxq_lock);
   7432 		}
   7433 	}
   7434 
   7435 #if 0 /* notyet */
   7436 	if (sc->sc_type >= WM_T_82544)
   7437 		CSR_WRITE(sc, WMREG_WUC, 0);
   7438 #endif
   7439 }
   7440 
   7441 static void
   7442 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7443 {
   7444 	struct mbuf *m;
   7445 	int i;
   7446 
   7447 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7448 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7449 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7450 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7451 		    m->m_data, m->m_len, m->m_flags);
   7452 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7453 	    i, i == 1 ? "" : "s");
   7454 }
   7455 
   7456 /*
   7457  * wm_82547_txfifo_stall:
   7458  *
   7459  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7460  *	reset the FIFO pointers, and restart packet transmission.
   7461  */
   7462 static void
   7463 wm_82547_txfifo_stall(void *arg)
   7464 {
   7465 	struct wm_softc *sc = arg;
   7466 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7467 
   7468 	mutex_enter(txq->txq_lock);
   7469 
   7470 	if (txq->txq_stopping)
   7471 		goto out;
   7472 
   7473 	if (txq->txq_fifo_stall) {
   7474 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7475 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7476 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7477 			/*
   7478 			 * Packets have drained.  Stop transmitter, reset
   7479 			 * FIFO pointers, restart transmitter, and kick
   7480 			 * the packet queue.
   7481 			 */
   7482 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7483 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7484 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7485 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7486 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7487 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7488 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7489 			CSR_WRITE_FLUSH(sc);
   7490 
   7491 			txq->txq_fifo_head = 0;
   7492 			txq->txq_fifo_stall = 0;
   7493 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7494 		} else {
   7495 			/*
   7496 			 * Still waiting for packets to drain; try again in
   7497 			 * another tick.
   7498 			 */
   7499 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7500 		}
   7501 	}
   7502 
   7503 out:
   7504 	mutex_exit(txq->txq_lock);
   7505 }
   7506 
   7507 /*
   7508  * wm_82547_txfifo_bugchk:
   7509  *
   7510  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7511  *	prevent enqueueing a packet that would wrap around the end
   7512  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7513  *
   7514  *	We do this by checking the amount of space before the end
   7515  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7516  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7517  *	the internal FIFO pointers to the beginning, and restart
   7518  *	transmission on the interface.
   7519  */
   7520 #define	WM_FIFO_HDR		0x10
   7521 #define	WM_82547_PAD_LEN	0x3e0
   7522 static int
   7523 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7524 {
   7525 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7526 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7527 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7528 
   7529 	/* Just return if already stalled. */
   7530 	if (txq->txq_fifo_stall)
   7531 		return 1;
   7532 
   7533 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7534 		/* Stall only occurs in half-duplex mode. */
   7535 		goto send_packet;
   7536 	}
   7537 
   7538 	if (len >= WM_82547_PAD_LEN + space) {
   7539 		txq->txq_fifo_stall = 1;
   7540 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7541 		return 1;
   7542 	}
   7543 
   7544 send_packet:
   7545 	txq->txq_fifo_head += len;
   7546 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7547 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7548 
   7549 	return 0;
   7550 }
   7551 
   7552 static int
   7553 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7554 {
   7555 	int error;
   7556 
   7557 	/*
   7558 	 * Allocate the control data structures, and create and load the
   7559 	 * DMA map for it.
   7560 	 *
   7561 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7562 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7563 	 * both sets within the same 4G segment.
   7564 	 */
   7565 	if (sc->sc_type < WM_T_82544)
   7566 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7567 	else
   7568 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7569 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7570 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7571 	else
   7572 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7573 
   7574 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7575 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7576 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7577 		aprint_error_dev(sc->sc_dev,
   7578 		    "unable to allocate TX control data, error = %d\n",
   7579 		    error);
   7580 		goto fail_0;
   7581 	}
   7582 
   7583 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7584 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7585 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7586 		aprint_error_dev(sc->sc_dev,
   7587 		    "unable to map TX control data, error = %d\n", error);
   7588 		goto fail_1;
   7589 	}
   7590 
   7591 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7592 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7593 		aprint_error_dev(sc->sc_dev,
   7594 		    "unable to create TX control data DMA map, error = %d\n",
   7595 		    error);
   7596 		goto fail_2;
   7597 	}
   7598 
   7599 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7600 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7601 		aprint_error_dev(sc->sc_dev,
   7602 		    "unable to load TX control data DMA map, error = %d\n",
   7603 		    error);
   7604 		goto fail_3;
   7605 	}
   7606 
   7607 	return 0;
   7608 
   7609 fail_3:
   7610 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7611 fail_2:
   7612 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7613 	    WM_TXDESCS_SIZE(txq));
   7614 fail_1:
   7615 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7616 fail_0:
   7617 	return error;
   7618 }
   7619 
   7620 static void
   7621 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7622 {
   7623 
   7624 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7625 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7626 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7627 	    WM_TXDESCS_SIZE(txq));
   7628 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7629 }
   7630 
   7631 static int
   7632 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7633 {
   7634 	int error;
   7635 	size_t rxq_descs_size;
   7636 
   7637 	/*
   7638 	 * Allocate the control data structures, and create and load the
   7639 	 * DMA map for it.
   7640 	 *
   7641 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7642 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7643 	 * both sets within the same 4G segment.
   7644 	 */
   7645 	rxq->rxq_ndesc = WM_NRXDESC;
   7646 	if (sc->sc_type == WM_T_82574)
   7647 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7648 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7649 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7650 	else
   7651 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7652 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7653 
   7654 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7655 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7656 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7657 		aprint_error_dev(sc->sc_dev,
   7658 		    "unable to allocate RX control data, error = %d\n",
   7659 		    error);
   7660 		goto fail_0;
   7661 	}
   7662 
   7663 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7664 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7665 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7666 		aprint_error_dev(sc->sc_dev,
   7667 		    "unable to map RX control data, error = %d\n", error);
   7668 		goto fail_1;
   7669 	}
   7670 
   7671 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7672 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7673 		aprint_error_dev(sc->sc_dev,
   7674 		    "unable to create RX control data DMA map, error = %d\n",
   7675 		    error);
   7676 		goto fail_2;
   7677 	}
   7678 
   7679 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7680 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7681 		aprint_error_dev(sc->sc_dev,
   7682 		    "unable to load RX control data DMA map, error = %d\n",
   7683 		    error);
   7684 		goto fail_3;
   7685 	}
   7686 
   7687 	return 0;
   7688 
   7689  fail_3:
   7690 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7691  fail_2:
   7692 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7693 	    rxq_descs_size);
   7694  fail_1:
   7695 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7696  fail_0:
   7697 	return error;
   7698 }
   7699 
   7700 static void
   7701 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7702 {
   7703 
   7704 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7705 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7706 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7707 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7708 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7709 }
   7710 
   7711 
   7712 static int
   7713 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7714 {
   7715 	int i, error;
   7716 
   7717 	/* Create the transmit buffer DMA maps. */
   7718 	WM_TXQUEUELEN(txq) =
   7719 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7720 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7721 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7722 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7723 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7724 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7725 			aprint_error_dev(sc->sc_dev,
   7726 			    "unable to create Tx DMA map %d, error = %d\n",
   7727 			    i, error);
   7728 			goto fail;
   7729 		}
   7730 	}
   7731 
   7732 	return 0;
   7733 
   7734 fail:
   7735 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7736 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7737 			bus_dmamap_destroy(sc->sc_dmat,
   7738 			    txq->txq_soft[i].txs_dmamap);
   7739 	}
   7740 	return error;
   7741 }
   7742 
   7743 static void
   7744 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7745 {
   7746 	int i;
   7747 
   7748 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7749 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7750 			bus_dmamap_destroy(sc->sc_dmat,
   7751 			    txq->txq_soft[i].txs_dmamap);
   7752 	}
   7753 }
   7754 
   7755 static int
   7756 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7757 {
   7758 	int i, error;
   7759 
   7760 	/* Create the receive buffer DMA maps. */
   7761 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7762 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7763 			    MCLBYTES, 0, 0,
   7764 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7765 			aprint_error_dev(sc->sc_dev,
   7766 			    "unable to create Rx DMA map %d error = %d\n",
   7767 			    i, error);
   7768 			goto fail;
   7769 		}
   7770 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7771 	}
   7772 
   7773 	return 0;
   7774 
   7775  fail:
   7776 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7777 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7778 			bus_dmamap_destroy(sc->sc_dmat,
   7779 			    rxq->rxq_soft[i].rxs_dmamap);
   7780 	}
   7781 	return error;
   7782 }
   7783 
   7784 static void
   7785 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7786 {
   7787 	int i;
   7788 
   7789 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7790 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7791 			bus_dmamap_destroy(sc->sc_dmat,
   7792 			    rxq->rxq_soft[i].rxs_dmamap);
   7793 	}
   7794 }
   7795 
   7796 /*
   7797  * wm_alloc_quques:
   7798  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7799  */
   7800 static int
   7801 wm_alloc_txrx_queues(struct wm_softc *sc)
   7802 {
   7803 	int i, error, tx_done, rx_done;
   7804 
   7805 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7806 	    KM_SLEEP);
   7807 	if (sc->sc_queue == NULL) {
   7808 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7809 		error = ENOMEM;
   7810 		goto fail_0;
   7811 	}
   7812 
   7813 	/* For transmission */
   7814 	error = 0;
   7815 	tx_done = 0;
   7816 	for (i = 0; i < sc->sc_nqueues; i++) {
   7817 #ifdef WM_EVENT_COUNTERS
   7818 		int j;
   7819 		const char *xname;
   7820 #endif
   7821 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7822 		txq->txq_sc = sc;
   7823 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7824 
   7825 		error = wm_alloc_tx_descs(sc, txq);
   7826 		if (error)
   7827 			break;
   7828 		error = wm_alloc_tx_buffer(sc, txq);
   7829 		if (error) {
   7830 			wm_free_tx_descs(sc, txq);
   7831 			break;
   7832 		}
   7833 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7834 		if (txq->txq_interq == NULL) {
   7835 			wm_free_tx_descs(sc, txq);
   7836 			wm_free_tx_buffer(sc, txq);
   7837 			error = ENOMEM;
   7838 			break;
   7839 		}
   7840 
   7841 #ifdef WM_EVENT_COUNTERS
   7842 		xname = device_xname(sc->sc_dev);
   7843 
   7844 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7845 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7846 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7847 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7848 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7849 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7850 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7851 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7852 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7853 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7854 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7855 
   7856 		for (j = 0; j < WM_NTXSEGS; j++) {
   7857 			snprintf(txq->txq_txseg_evcnt_names[j],
   7858 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   7859 			    "txq%02dtxseg%d", i, j);
   7860 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   7861 			    EVCNT_TYPE_MISC,
   7862 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7863 		}
   7864 
   7865 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7866 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7867 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7868 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7869 		/* Only for 82544 (and earlier?) */
   7870 		if (sc->sc_type <= WM_T_82544)
   7871 			WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7872 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7873 #endif /* WM_EVENT_COUNTERS */
   7874 
   7875 		tx_done++;
   7876 	}
   7877 	if (error)
   7878 		goto fail_1;
   7879 
   7880 	/* For receive */
   7881 	error = 0;
   7882 	rx_done = 0;
   7883 	for (i = 0; i < sc->sc_nqueues; i++) {
   7884 #ifdef WM_EVENT_COUNTERS
   7885 		const char *xname;
   7886 #endif
   7887 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7888 		rxq->rxq_sc = sc;
   7889 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7890 
   7891 		error = wm_alloc_rx_descs(sc, rxq);
   7892 		if (error)
   7893 			break;
   7894 
   7895 		error = wm_alloc_rx_buffer(sc, rxq);
   7896 		if (error) {
   7897 			wm_free_rx_descs(sc, rxq);
   7898 			break;
   7899 		}
   7900 
   7901 #ifdef WM_EVENT_COUNTERS
   7902 		xname = device_xname(sc->sc_dev);
   7903 
   7904 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7905 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7906 
   7907 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7908 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7909 #endif /* WM_EVENT_COUNTERS */
   7910 
   7911 		rx_done++;
   7912 	}
   7913 	if (error)
   7914 		goto fail_2;
   7915 
   7916 	return 0;
   7917 
   7918 fail_2:
   7919 	for (i = 0; i < rx_done; i++) {
   7920 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7921 		wm_free_rx_buffer(sc, rxq);
   7922 		wm_free_rx_descs(sc, rxq);
   7923 		if (rxq->rxq_lock)
   7924 			mutex_obj_free(rxq->rxq_lock);
   7925 	}
   7926 fail_1:
   7927 	for (i = 0; i < tx_done; i++) {
   7928 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7929 		pcq_destroy(txq->txq_interq);
   7930 		wm_free_tx_buffer(sc, txq);
   7931 		wm_free_tx_descs(sc, txq);
   7932 		if (txq->txq_lock)
   7933 			mutex_obj_free(txq->txq_lock);
   7934 	}
   7935 
   7936 	kmem_free(sc->sc_queue,
   7937 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7938 fail_0:
   7939 	return error;
   7940 }
   7941 
   7942 /*
   7943  * wm_free_quques:
   7944  *	Free {tx,rx}descs and {tx,rx} buffers
   7945  */
   7946 static void
   7947 wm_free_txrx_queues(struct wm_softc *sc)
   7948 {
   7949 	int i;
   7950 
   7951 	for (i = 0; i < sc->sc_nqueues; i++) {
   7952 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7953 
   7954 #ifdef WM_EVENT_COUNTERS
   7955 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7956 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7957 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7958 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7959 #endif /* WM_EVENT_COUNTERS */
   7960 
   7961 		wm_free_rx_buffer(sc, rxq);
   7962 		wm_free_rx_descs(sc, rxq);
   7963 		if (rxq->rxq_lock)
   7964 			mutex_obj_free(rxq->rxq_lock);
   7965 	}
   7966 
   7967 	for (i = 0; i < sc->sc_nqueues; i++) {
   7968 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7969 		struct mbuf *m;
   7970 #ifdef WM_EVENT_COUNTERS
   7971 		int j;
   7972 
   7973 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7974 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7975 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7976 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7977 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7978 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7979 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7980 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7981 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7982 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7983 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7984 
   7985 		for (j = 0; j < WM_NTXSEGS; j++)
   7986 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7987 
   7988 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7989 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7990 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7991 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7992 		if (sc->sc_type <= WM_T_82544)
   7993 			WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7994 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7995 #endif /* WM_EVENT_COUNTERS */
   7996 
   7997 		/* Drain txq_interq */
   7998 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7999 			m_freem(m);
   8000 		pcq_destroy(txq->txq_interq);
   8001 
   8002 		wm_free_tx_buffer(sc, txq);
   8003 		wm_free_tx_descs(sc, txq);
   8004 		if (txq->txq_lock)
   8005 			mutex_obj_free(txq->txq_lock);
   8006 	}
   8007 
   8008 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   8009 }
   8010 
   8011 static void
   8012 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8013 {
   8014 
   8015 	KASSERT(mutex_owned(txq->txq_lock));
   8016 
   8017 	/* Initialize the transmit descriptor ring. */
   8018 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   8019 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   8020 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8021 	txq->txq_free = WM_NTXDESC(txq);
   8022 	txq->txq_next = 0;
   8023 }
   8024 
   8025 static void
   8026 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8027     struct wm_txqueue *txq)
   8028 {
   8029 
   8030 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8031 		device_xname(sc->sc_dev), __func__));
   8032 	KASSERT(mutex_owned(txq->txq_lock));
   8033 
   8034 	if (sc->sc_type < WM_T_82543) {
   8035 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   8036 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   8037 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   8038 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   8039 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   8040 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   8041 	} else {
   8042 		int qid = wmq->wmq_id;
   8043 
   8044 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   8045 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   8046 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   8047 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   8048 
   8049 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8050 			/*
   8051 			 * Don't write TDT before TCTL.EN is set.
   8052 			 * See the document.
   8053 			 */
   8054 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   8055 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   8056 			    | TXDCTL_WTHRESH(0));
   8057 		else {
   8058 			/* XXX should update with AIM? */
   8059 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   8060 			if (sc->sc_type >= WM_T_82540) {
   8061 				/* Should be the same */
   8062 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   8063 			}
   8064 
   8065 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   8066 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   8067 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   8068 		}
   8069 	}
   8070 }
   8071 
   8072 static void
   8073 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   8074 {
   8075 	int i;
   8076 
   8077 	KASSERT(mutex_owned(txq->txq_lock));
   8078 
   8079 	/* Initialize the transmit job descriptors. */
   8080 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   8081 		txq->txq_soft[i].txs_mbuf = NULL;
   8082 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   8083 	txq->txq_snext = 0;
   8084 	txq->txq_sdirty = 0;
   8085 }
   8086 
   8087 static void
   8088 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8089     struct wm_txqueue *txq)
   8090 {
   8091 
   8092 	KASSERT(mutex_owned(txq->txq_lock));
   8093 
   8094 	/*
   8095 	 * Set up some register offsets that are different between
   8096 	 * the i82542 and the i82543 and later chips.
   8097 	 */
   8098 	if (sc->sc_type < WM_T_82543)
   8099 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   8100 	else
   8101 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   8102 
   8103 	wm_init_tx_descs(sc, txq);
   8104 	wm_init_tx_regs(sc, wmq, txq);
   8105 	wm_init_tx_buffer(sc, txq);
   8106 
   8107 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   8108 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   8109 
   8110 	txq->txq_sending = false;
   8111 }
   8112 
   8113 static void
   8114 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   8115     struct wm_rxqueue *rxq)
   8116 {
   8117 
   8118 	KASSERT(mutex_owned(rxq->rxq_lock));
   8119 
   8120 	/*
   8121 	 * Initialize the receive descriptor and receive job
   8122 	 * descriptor rings.
   8123 	 */
   8124 	if (sc->sc_type < WM_T_82543) {
   8125 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   8126 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   8127 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   8128 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8129 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   8130 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   8131 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   8132 
   8133 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   8134 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   8135 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   8136 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   8137 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   8138 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   8139 	} else {
   8140 		int qid = wmq->wmq_id;
   8141 
   8142 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   8143 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   8144 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   8145 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   8146 
   8147 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8148 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   8149 				panic("%s: MCLBYTES %d unsupported for 82575 "
   8150 				    "or higher\n", __func__, MCLBYTES);
   8151 
   8152 			/*
   8153 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   8154 			 * only.
   8155 			 */
   8156 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   8157 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   8158 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   8159 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   8160 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   8161 			    | RXDCTL_WTHRESH(1));
   8162 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8163 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8164 		} else {
   8165 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   8166 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   8167 			/* XXX should update with AIM? */
   8168 			CSR_WRITE(sc, WMREG_RDTR,
   8169 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   8170 			/* MUST be same */
   8171 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   8172 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   8173 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   8174 		}
   8175 	}
   8176 }
   8177 
   8178 static int
   8179 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   8180 {
   8181 	struct wm_rxsoft *rxs;
   8182 	int error, i;
   8183 
   8184 	KASSERT(mutex_owned(rxq->rxq_lock));
   8185 
   8186 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   8187 		rxs = &rxq->rxq_soft[i];
   8188 		if (rxs->rxs_mbuf == NULL) {
   8189 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   8190 				log(LOG_ERR, "%s: unable to allocate or map "
   8191 				    "rx buffer %d, error = %d\n",
   8192 				    device_xname(sc->sc_dev), i, error);
   8193 				/*
   8194 				 * XXX Should attempt to run with fewer receive
   8195 				 * XXX buffers instead of just failing.
   8196 				 */
   8197 				wm_rxdrain(rxq);
   8198 				return ENOMEM;
   8199 			}
   8200 		} else {
   8201 			/*
   8202 			 * For 82575 and 82576, the RX descriptors must be
   8203 			 * initialized after the setting of RCTL.EN in
   8204 			 * wm_set_filter()
   8205 			 */
   8206 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   8207 				wm_init_rxdesc(rxq, i);
   8208 		}
   8209 	}
   8210 	rxq->rxq_ptr = 0;
   8211 	rxq->rxq_discard = 0;
   8212 	WM_RXCHAIN_RESET(rxq);
   8213 
   8214 	return 0;
   8215 }
   8216 
   8217 static int
   8218 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   8219     struct wm_rxqueue *rxq)
   8220 {
   8221 
   8222 	KASSERT(mutex_owned(rxq->rxq_lock));
   8223 
   8224 	/*
   8225 	 * Set up some register offsets that are different between
   8226 	 * the i82542 and the i82543 and later chips.
   8227 	 */
   8228 	if (sc->sc_type < WM_T_82543)
   8229 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8230 	else
   8231 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8232 
   8233 	wm_init_rx_regs(sc, wmq, rxq);
   8234 	return wm_init_rx_buffer(sc, rxq);
   8235 }
   8236 
   8237 /*
   8238  * wm_init_quques:
   8239  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8240  */
   8241 static int
   8242 wm_init_txrx_queues(struct wm_softc *sc)
   8243 {
   8244 	int i, error = 0;
   8245 
   8246 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8247 		device_xname(sc->sc_dev), __func__));
   8248 
   8249 	for (i = 0; i < sc->sc_nqueues; i++) {
   8250 		struct wm_queue *wmq = &sc->sc_queue[i];
   8251 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8252 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8253 
   8254 		/*
   8255 		 * TODO
   8256 		 * Currently, use constant variable instead of AIM.
   8257 		 * Furthermore, the interrupt interval of multiqueue which use
   8258 		 * polling mode is less than default value.
   8259 		 * More tuning and AIM are required.
   8260 		 */
   8261 		if (wm_is_using_multiqueue(sc))
   8262 			wmq->wmq_itr = 50;
   8263 		else
   8264 			wmq->wmq_itr = sc->sc_itr_init;
   8265 		wmq->wmq_set_itr = true;
   8266 
   8267 		mutex_enter(txq->txq_lock);
   8268 		wm_init_tx_queue(sc, wmq, txq);
   8269 		mutex_exit(txq->txq_lock);
   8270 
   8271 		mutex_enter(rxq->rxq_lock);
   8272 		error = wm_init_rx_queue(sc, wmq, rxq);
   8273 		mutex_exit(rxq->rxq_lock);
   8274 		if (error)
   8275 			break;
   8276 	}
   8277 
   8278 	return error;
   8279 }
   8280 
   8281 /*
   8282  * wm_tx_offload:
   8283  *
   8284  *	Set up TCP/IP checksumming parameters for the
   8285  *	specified packet.
   8286  */
   8287 static void
   8288 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8289     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8290 {
   8291 	struct mbuf *m0 = txs->txs_mbuf;
   8292 	struct livengood_tcpip_ctxdesc *t;
   8293 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8294 	uint32_t ipcse;
   8295 	struct ether_header *eh;
   8296 	int offset, iphl;
   8297 	uint8_t fields;
   8298 
   8299 	/*
   8300 	 * XXX It would be nice if the mbuf pkthdr had offset
   8301 	 * fields for the protocol headers.
   8302 	 */
   8303 
   8304 	eh = mtod(m0, struct ether_header *);
   8305 	switch (htons(eh->ether_type)) {
   8306 	case ETHERTYPE_IP:
   8307 	case ETHERTYPE_IPV6:
   8308 		offset = ETHER_HDR_LEN;
   8309 		break;
   8310 
   8311 	case ETHERTYPE_VLAN:
   8312 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8313 		break;
   8314 
   8315 	default:
   8316 		/* Don't support this protocol or encapsulation. */
   8317 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8318 		txq->txq_last_hw_ipcs = 0;
   8319 		txq->txq_last_hw_tucs = 0;
   8320 		*fieldsp = 0;
   8321 		*cmdp = 0;
   8322 		return;
   8323 	}
   8324 
   8325 	if ((m0->m_pkthdr.csum_flags &
   8326 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8327 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8328 	} else
   8329 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8330 
   8331 	ipcse = offset + iphl - 1;
   8332 
   8333 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8334 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8335 	seg = 0;
   8336 	fields = 0;
   8337 
   8338 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8339 		int hlen = offset + iphl;
   8340 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8341 
   8342 		if (__predict_false(m0->m_len <
   8343 				    (hlen + sizeof(struct tcphdr)))) {
   8344 			/*
   8345 			 * TCP/IP headers are not in the first mbuf; we need
   8346 			 * to do this the slow and painful way. Let's just
   8347 			 * hope this doesn't happen very often.
   8348 			 */
   8349 			struct tcphdr th;
   8350 
   8351 			WM_Q_EVCNT_INCR(txq, tsopain);
   8352 
   8353 			m_copydata(m0, hlen, sizeof(th), &th);
   8354 			if (v4) {
   8355 				struct ip ip;
   8356 
   8357 				m_copydata(m0, offset, sizeof(ip), &ip);
   8358 				ip.ip_len = 0;
   8359 				m_copyback(m0,
   8360 				    offset + offsetof(struct ip, ip_len),
   8361 				    sizeof(ip.ip_len), &ip.ip_len);
   8362 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8363 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8364 			} else {
   8365 				struct ip6_hdr ip6;
   8366 
   8367 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8368 				ip6.ip6_plen = 0;
   8369 				m_copyback(m0,
   8370 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8371 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8372 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8373 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8374 			}
   8375 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8376 			    sizeof(th.th_sum), &th.th_sum);
   8377 
   8378 			hlen += th.th_off << 2;
   8379 		} else {
   8380 			/*
   8381 			 * TCP/IP headers are in the first mbuf; we can do
   8382 			 * this the easy way.
   8383 			 */
   8384 			struct tcphdr *th;
   8385 
   8386 			if (v4) {
   8387 				struct ip *ip =
   8388 				    (void *)(mtod(m0, char *) + offset);
   8389 				th = (void *)(mtod(m0, char *) + hlen);
   8390 
   8391 				ip->ip_len = 0;
   8392 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8393 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8394 			} else {
   8395 				struct ip6_hdr *ip6 =
   8396 				    (void *)(mtod(m0, char *) + offset);
   8397 				th = (void *)(mtod(m0, char *) + hlen);
   8398 
   8399 				ip6->ip6_plen = 0;
   8400 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8401 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8402 			}
   8403 			hlen += th->th_off << 2;
   8404 		}
   8405 
   8406 		if (v4) {
   8407 			WM_Q_EVCNT_INCR(txq, tso);
   8408 			cmdlen |= WTX_TCPIP_CMD_IP;
   8409 		} else {
   8410 			WM_Q_EVCNT_INCR(txq, tso6);
   8411 			ipcse = 0;
   8412 		}
   8413 		cmd |= WTX_TCPIP_CMD_TSE;
   8414 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8415 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8416 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8417 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8418 	}
   8419 
   8420 	/*
   8421 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8422 	 * offload feature, if we load the context descriptor, we
   8423 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8424 	 */
   8425 
   8426 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8427 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8428 	    WTX_TCPIP_IPCSE(ipcse);
   8429 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8430 		WM_Q_EVCNT_INCR(txq, ipsum);
   8431 		fields |= WTX_IXSM;
   8432 	}
   8433 
   8434 	offset += iphl;
   8435 
   8436 	if (m0->m_pkthdr.csum_flags &
   8437 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8438 		WM_Q_EVCNT_INCR(txq, tusum);
   8439 		fields |= WTX_TXSM;
   8440 		tucs = WTX_TCPIP_TUCSS(offset) |
   8441 		    WTX_TCPIP_TUCSO(offset +
   8442 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8443 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8444 	} else if ((m0->m_pkthdr.csum_flags &
   8445 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8446 		WM_Q_EVCNT_INCR(txq, tusum6);
   8447 		fields |= WTX_TXSM;
   8448 		tucs = WTX_TCPIP_TUCSS(offset) |
   8449 		    WTX_TCPIP_TUCSO(offset +
   8450 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8451 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8452 	} else {
   8453 		/* Just initialize it to a valid TCP context. */
   8454 		tucs = WTX_TCPIP_TUCSS(offset) |
   8455 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8456 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8457 	}
   8458 
   8459 	*cmdp = cmd;
   8460 	*fieldsp = fields;
   8461 
   8462 	/*
   8463 	 * We don't have to write context descriptor for every packet
   8464 	 * except for 82574. For 82574, we must write context descriptor
   8465 	 * for every packet when we use two descriptor queues.
   8466 	 *
   8467 	 * The 82574L can only remember the *last* context used
   8468 	 * regardless of queue that it was use for.  We cannot reuse
   8469 	 * contexts on this hardware platform and must generate a new
   8470 	 * context every time.  82574L hardware spec, section 7.2.6,
   8471 	 * second note.
   8472 	 */
   8473 	if (sc->sc_nqueues < 2) {
   8474 		/*
   8475 		 * Setting up new checksum offload context for every
   8476 		 * frames takes a lot of processing time for hardware.
   8477 		 * This also reduces performance a lot for small sized
   8478 		 * frames so avoid it if driver can use previously
   8479 		 * configured checksum offload context.
   8480 		 * For TSO, in theory we can use the same TSO context only if
   8481 		 * frame is the same type(IP/TCP) and the same MSS. However
   8482 		 * checking whether a frame has the same IP/TCP structure is a
   8483 		 * hard thing so just ignore that and always restablish a
   8484 		 * new TSO context.
   8485 		 */
   8486 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8487 		    == 0) {
   8488 			if (txq->txq_last_hw_cmd == cmd &&
   8489 			    txq->txq_last_hw_fields == fields &&
   8490 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8491 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8492 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8493 				return;
   8494 			}
   8495 		}
   8496 
   8497 		txq->txq_last_hw_cmd = cmd;
   8498 		txq->txq_last_hw_fields = fields;
   8499 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8500 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8501 	}
   8502 
   8503 	/* Fill in the context descriptor. */
   8504 	t = (struct livengood_tcpip_ctxdesc *)
   8505 	    &txq->txq_descs[txq->txq_next];
   8506 	t->tcpip_ipcs = htole32(ipcs);
   8507 	t->tcpip_tucs = htole32(tucs);
   8508 	t->tcpip_cmdlen = htole32(cmdlen);
   8509 	t->tcpip_seg = htole32(seg);
   8510 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8511 
   8512 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8513 	txs->txs_ndesc++;
   8514 }
   8515 
   8516 static inline int
   8517 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8518 {
   8519 	struct wm_softc *sc = ifp->if_softc;
   8520 	u_int cpuid = cpu_index(curcpu());
   8521 
   8522 	/*
   8523 	 * Currently, simple distribute strategy.
   8524 	 * TODO:
   8525 	 * distribute by flowid(RSS has value).
   8526 	 */
   8527 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8528 }
   8529 
   8530 static inline bool
   8531 wm_linkdown_discard(struct wm_txqueue *txq)
   8532 {
   8533 
   8534 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8535 		return true;
   8536 
   8537 	return false;
   8538 }
   8539 
   8540 /*
   8541  * wm_start:		[ifnet interface function]
   8542  *
   8543  *	Start packet transmission on the interface.
   8544  */
   8545 static void
   8546 wm_start(struct ifnet *ifp)
   8547 {
   8548 	struct wm_softc *sc = ifp->if_softc;
   8549 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8550 
   8551 	KASSERT(if_is_mpsafe(ifp));
   8552 	/*
   8553 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8554 	 */
   8555 
   8556 	mutex_enter(txq->txq_lock);
   8557 	if (!txq->txq_stopping)
   8558 		wm_start_locked(ifp);
   8559 	mutex_exit(txq->txq_lock);
   8560 }
   8561 
   8562 static void
   8563 wm_start_locked(struct ifnet *ifp)
   8564 {
   8565 	struct wm_softc *sc = ifp->if_softc;
   8566 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8567 
   8568 	wm_send_common_locked(ifp, txq, false);
   8569 }
   8570 
   8571 static int
   8572 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8573 {
   8574 	int qid;
   8575 	struct wm_softc *sc = ifp->if_softc;
   8576 	struct wm_txqueue *txq;
   8577 
   8578 	qid = wm_select_txqueue(ifp, m);
   8579 	txq = &sc->sc_queue[qid].wmq_txq;
   8580 
   8581 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8582 		m_freem(m);
   8583 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8584 		return ENOBUFS;
   8585 	}
   8586 
   8587 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8588 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8589 	if (m->m_flags & M_MCAST)
   8590 		if_statinc_ref(nsr, if_omcasts);
   8591 	IF_STAT_PUTREF(ifp);
   8592 
   8593 	if (mutex_tryenter(txq->txq_lock)) {
   8594 		if (!txq->txq_stopping)
   8595 			wm_transmit_locked(ifp, txq);
   8596 		mutex_exit(txq->txq_lock);
   8597 	}
   8598 
   8599 	return 0;
   8600 }
   8601 
   8602 static void
   8603 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8604 {
   8605 
   8606 	wm_send_common_locked(ifp, txq, true);
   8607 }
   8608 
   8609 static void
   8610 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8611     bool is_transmit)
   8612 {
   8613 	struct wm_softc *sc = ifp->if_softc;
   8614 	struct mbuf *m0;
   8615 	struct wm_txsoft *txs;
   8616 	bus_dmamap_t dmamap;
   8617 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8618 	bus_addr_t curaddr;
   8619 	bus_size_t seglen, curlen;
   8620 	uint32_t cksumcmd;
   8621 	uint8_t cksumfields;
   8622 	bool remap = true;
   8623 
   8624 	KASSERT(mutex_owned(txq->txq_lock));
   8625 	KASSERT(!txq->txq_stopping);
   8626 
   8627 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8628 		return;
   8629 
   8630 	if (__predict_false(wm_linkdown_discard(txq))) {
   8631 		do {
   8632 			if (is_transmit)
   8633 				m0 = pcq_get(txq->txq_interq);
   8634 			else
   8635 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8636 			/*
   8637 			 * increment successed packet counter as in the case
   8638 			 * which the packet is discarded by link down PHY.
   8639 			 */
   8640 			if (m0 != NULL) {
   8641 				if_statinc(ifp, if_opackets);
   8642 				m_freem(m0);
   8643 			}
   8644 		} while (m0 != NULL);
   8645 		return;
   8646 	}
   8647 
   8648 	/* Remember the previous number of free descriptors. */
   8649 	ofree = txq->txq_free;
   8650 
   8651 	/*
   8652 	 * Loop through the send queue, setting up transmit descriptors
   8653 	 * until we drain the queue, or use up all available transmit
   8654 	 * descriptors.
   8655 	 */
   8656 	for (;;) {
   8657 		m0 = NULL;
   8658 
   8659 		/* Get a work queue entry. */
   8660 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8661 			wm_txeof(txq, UINT_MAX);
   8662 			if (txq->txq_sfree == 0) {
   8663 				DPRINTF(sc, WM_DEBUG_TX,
   8664 				    ("%s: TX: no free job descriptors\n",
   8665 					device_xname(sc->sc_dev)));
   8666 				WM_Q_EVCNT_INCR(txq, txsstall);
   8667 				break;
   8668 			}
   8669 		}
   8670 
   8671 		/* Grab a packet off the queue. */
   8672 		if (is_transmit)
   8673 			m0 = pcq_get(txq->txq_interq);
   8674 		else
   8675 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8676 		if (m0 == NULL)
   8677 			break;
   8678 
   8679 		DPRINTF(sc, WM_DEBUG_TX,
   8680 		    ("%s: TX: have packet to transmit: %p\n",
   8681 			device_xname(sc->sc_dev), m0));
   8682 
   8683 		txs = &txq->txq_soft[txq->txq_snext];
   8684 		dmamap = txs->txs_dmamap;
   8685 
   8686 		use_tso = (m0->m_pkthdr.csum_flags &
   8687 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8688 
   8689 		/*
   8690 		 * So says the Linux driver:
   8691 		 * The controller does a simple calculation to make sure
   8692 		 * there is enough room in the FIFO before initiating the
   8693 		 * DMA for each buffer. The calc is:
   8694 		 *	4 = ceil(buffer len / MSS)
   8695 		 * To make sure we don't overrun the FIFO, adjust the max
   8696 		 * buffer len if the MSS drops.
   8697 		 */
   8698 		dmamap->dm_maxsegsz =
   8699 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8700 		    ? m0->m_pkthdr.segsz << 2
   8701 		    : WTX_MAX_LEN;
   8702 
   8703 		/*
   8704 		 * Load the DMA map.  If this fails, the packet either
   8705 		 * didn't fit in the allotted number of segments, or we
   8706 		 * were short on resources.  For the too-many-segments
   8707 		 * case, we simply report an error and drop the packet,
   8708 		 * since we can't sanely copy a jumbo packet to a single
   8709 		 * buffer.
   8710 		 */
   8711 retry:
   8712 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8713 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8714 		if (__predict_false(error)) {
   8715 			if (error == EFBIG) {
   8716 				if (remap == true) {
   8717 					struct mbuf *m;
   8718 
   8719 					remap = false;
   8720 					m = m_defrag(m0, M_NOWAIT);
   8721 					if (m != NULL) {
   8722 						WM_Q_EVCNT_INCR(txq, defrag);
   8723 						m0 = m;
   8724 						goto retry;
   8725 					}
   8726 				}
   8727 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8728 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8729 				    "DMA segments, dropping...\n",
   8730 				    device_xname(sc->sc_dev));
   8731 				wm_dump_mbuf_chain(sc, m0);
   8732 				m_freem(m0);
   8733 				continue;
   8734 			}
   8735 			/* Short on resources, just stop for now. */
   8736 			DPRINTF(sc, WM_DEBUG_TX,
   8737 			    ("%s: TX: dmamap load failed: %d\n",
   8738 				device_xname(sc->sc_dev), error));
   8739 			break;
   8740 		}
   8741 
   8742 		segs_needed = dmamap->dm_nsegs;
   8743 		if (use_tso) {
   8744 			/* For sentinel descriptor; see below. */
   8745 			segs_needed++;
   8746 		}
   8747 
   8748 		/*
   8749 		 * Ensure we have enough descriptors free to describe
   8750 		 * the packet. Note, we always reserve one descriptor
   8751 		 * at the end of the ring due to the semantics of the
   8752 		 * TDT register, plus one more in the event we need
   8753 		 * to load offload context.
   8754 		 */
   8755 		if (segs_needed > txq->txq_free - 2) {
   8756 			/*
   8757 			 * Not enough free descriptors to transmit this
   8758 			 * packet.  We haven't committed anything yet,
   8759 			 * so just unload the DMA map, put the packet
   8760 			 * pack on the queue, and punt. Notify the upper
   8761 			 * layer that there are no more slots left.
   8762 			 */
   8763 			DPRINTF(sc, WM_DEBUG_TX,
   8764 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8765 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8766 				segs_needed, txq->txq_free - 1));
   8767 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8768 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8769 			WM_Q_EVCNT_INCR(txq, txdstall);
   8770 			break;
   8771 		}
   8772 
   8773 		/*
   8774 		 * Check for 82547 Tx FIFO bug. We need to do this
   8775 		 * once we know we can transmit the packet, since we
   8776 		 * do some internal FIFO space accounting here.
   8777 		 */
   8778 		if (sc->sc_type == WM_T_82547 &&
   8779 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8780 			DPRINTF(sc, WM_DEBUG_TX,
   8781 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8782 				device_xname(sc->sc_dev)));
   8783 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8784 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8785 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8786 			break;
   8787 		}
   8788 
   8789 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8790 
   8791 		DPRINTF(sc, WM_DEBUG_TX,
   8792 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8793 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8794 
   8795 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8796 
   8797 		/*
   8798 		 * Store a pointer to the packet so that we can free it
   8799 		 * later.
   8800 		 *
   8801 		 * Initially, we consider the number of descriptors the
   8802 		 * packet uses the number of DMA segments.  This may be
   8803 		 * incremented by 1 if we do checksum offload (a descriptor
   8804 		 * is used to set the checksum context).
   8805 		 */
   8806 		txs->txs_mbuf = m0;
   8807 		txs->txs_firstdesc = txq->txq_next;
   8808 		txs->txs_ndesc = segs_needed;
   8809 
   8810 		/* Set up offload parameters for this packet. */
   8811 		if (m0->m_pkthdr.csum_flags &
   8812 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8813 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8814 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8815 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8816 		} else {
   8817 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8818 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8819 			cksumcmd = 0;
   8820 			cksumfields = 0;
   8821 		}
   8822 
   8823 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8824 
   8825 		/* Sync the DMA map. */
   8826 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8827 		    BUS_DMASYNC_PREWRITE);
   8828 
   8829 		/* Initialize the transmit descriptor. */
   8830 		for (nexttx = txq->txq_next, seg = 0;
   8831 		     seg < dmamap->dm_nsegs; seg++) {
   8832 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8833 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8834 			     seglen != 0;
   8835 			     curaddr += curlen, seglen -= curlen,
   8836 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8837 				curlen = seglen;
   8838 
   8839 				/*
   8840 				 * So says the Linux driver:
   8841 				 * Work around for premature descriptor
   8842 				 * write-backs in TSO mode.  Append a
   8843 				 * 4-byte sentinel descriptor.
   8844 				 */
   8845 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8846 				    curlen > 8)
   8847 					curlen -= 4;
   8848 
   8849 				wm_set_dma_addr(
   8850 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8851 				txq->txq_descs[nexttx].wtx_cmdlen
   8852 				    = htole32(cksumcmd | curlen);
   8853 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8854 				    = 0;
   8855 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8856 				    = cksumfields;
   8857 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8858 				lasttx = nexttx;
   8859 
   8860 				DPRINTF(sc, WM_DEBUG_TX,
   8861 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8862 					"len %#04zx\n",
   8863 					device_xname(sc->sc_dev), nexttx,
   8864 					(uint64_t)curaddr, curlen));
   8865 			}
   8866 		}
   8867 
   8868 		KASSERT(lasttx != -1);
   8869 
   8870 		/*
   8871 		 * Set up the command byte on the last descriptor of
   8872 		 * the packet. If we're in the interrupt delay window,
   8873 		 * delay the interrupt.
   8874 		 */
   8875 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8876 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8877 
   8878 		/*
   8879 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8880 		 * up the descriptor to encapsulate the packet for us.
   8881 		 *
   8882 		 * This is only valid on the last descriptor of the packet.
   8883 		 */
   8884 		if (vlan_has_tag(m0)) {
   8885 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8886 			    htole32(WTX_CMD_VLE);
   8887 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8888 			    = htole16(vlan_get_tag(m0));
   8889 		}
   8890 
   8891 		txs->txs_lastdesc = lasttx;
   8892 
   8893 		DPRINTF(sc, WM_DEBUG_TX,
   8894 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8895 			device_xname(sc->sc_dev),
   8896 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8897 
   8898 		/* Sync the descriptors we're using. */
   8899 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8900 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8901 
   8902 		/* Give the packet to the chip. */
   8903 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8904 
   8905 		DPRINTF(sc, WM_DEBUG_TX,
   8906 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8907 
   8908 		DPRINTF(sc, WM_DEBUG_TX,
   8909 		    ("%s: TX: finished transmitting packet, job %d\n",
   8910 			device_xname(sc->sc_dev), txq->txq_snext));
   8911 
   8912 		/* Advance the tx pointer. */
   8913 		txq->txq_free -= txs->txs_ndesc;
   8914 		txq->txq_next = nexttx;
   8915 
   8916 		txq->txq_sfree--;
   8917 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8918 
   8919 		/* Pass the packet to any BPF listeners. */
   8920 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8921 	}
   8922 
   8923 	if (m0 != NULL) {
   8924 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8925 		WM_Q_EVCNT_INCR(txq, descdrop);
   8926 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8927 			__func__));
   8928 		m_freem(m0);
   8929 	}
   8930 
   8931 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8932 		/* No more slots; notify upper layer. */
   8933 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8934 	}
   8935 
   8936 	if (txq->txq_free != ofree) {
   8937 		/* Set a watchdog timer in case the chip flakes out. */
   8938 		txq->txq_lastsent = time_uptime;
   8939 		txq->txq_sending = true;
   8940 	}
   8941 }
   8942 
   8943 /*
   8944  * wm_nq_tx_offload:
   8945  *
   8946  *	Set up TCP/IP checksumming parameters for the
   8947  *	specified packet, for NEWQUEUE devices
   8948  */
   8949 static void
   8950 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8951     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8952 {
   8953 	struct mbuf *m0 = txs->txs_mbuf;
   8954 	uint32_t vl_len, mssidx, cmdc;
   8955 	struct ether_header *eh;
   8956 	int offset, iphl;
   8957 
   8958 	/*
   8959 	 * XXX It would be nice if the mbuf pkthdr had offset
   8960 	 * fields for the protocol headers.
   8961 	 */
   8962 	*cmdlenp = 0;
   8963 	*fieldsp = 0;
   8964 
   8965 	eh = mtod(m0, struct ether_header *);
   8966 	switch (htons(eh->ether_type)) {
   8967 	case ETHERTYPE_IP:
   8968 	case ETHERTYPE_IPV6:
   8969 		offset = ETHER_HDR_LEN;
   8970 		break;
   8971 
   8972 	case ETHERTYPE_VLAN:
   8973 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8974 		break;
   8975 
   8976 	default:
   8977 		/* Don't support this protocol or encapsulation. */
   8978 		*do_csum = false;
   8979 		return;
   8980 	}
   8981 	*do_csum = true;
   8982 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8983 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8984 
   8985 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8986 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8987 
   8988 	if ((m0->m_pkthdr.csum_flags &
   8989 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8990 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8991 	} else {
   8992 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8993 	}
   8994 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8995 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8996 
   8997 	if (vlan_has_tag(m0)) {
   8998 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8999 		    << NQTXC_VLLEN_VLAN_SHIFT);
   9000 		*cmdlenp |= NQTX_CMD_VLE;
   9001 	}
   9002 
   9003 	mssidx = 0;
   9004 
   9005 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   9006 		int hlen = offset + iphl;
   9007 		int tcp_hlen;
   9008 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   9009 
   9010 		if (__predict_false(m0->m_len <
   9011 				    (hlen + sizeof(struct tcphdr)))) {
   9012 			/*
   9013 			 * TCP/IP headers are not in the first mbuf; we need
   9014 			 * to do this the slow and painful way. Let's just
   9015 			 * hope this doesn't happen very often.
   9016 			 */
   9017 			struct tcphdr th;
   9018 
   9019 			WM_Q_EVCNT_INCR(txq, tsopain);
   9020 
   9021 			m_copydata(m0, hlen, sizeof(th), &th);
   9022 			if (v4) {
   9023 				struct ip ip;
   9024 
   9025 				m_copydata(m0, offset, sizeof(ip), &ip);
   9026 				ip.ip_len = 0;
   9027 				m_copyback(m0,
   9028 				    offset + offsetof(struct ip, ip_len),
   9029 				    sizeof(ip.ip_len), &ip.ip_len);
   9030 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   9031 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   9032 			} else {
   9033 				struct ip6_hdr ip6;
   9034 
   9035 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   9036 				ip6.ip6_plen = 0;
   9037 				m_copyback(m0,
   9038 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   9039 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   9040 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   9041 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   9042 			}
   9043 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   9044 			    sizeof(th.th_sum), &th.th_sum);
   9045 
   9046 			tcp_hlen = th.th_off << 2;
   9047 		} else {
   9048 			/*
   9049 			 * TCP/IP headers are in the first mbuf; we can do
   9050 			 * this the easy way.
   9051 			 */
   9052 			struct tcphdr *th;
   9053 
   9054 			if (v4) {
   9055 				struct ip *ip =
   9056 				    (void *)(mtod(m0, char *) + offset);
   9057 				th = (void *)(mtod(m0, char *) + hlen);
   9058 
   9059 				ip->ip_len = 0;
   9060 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   9061 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   9062 			} else {
   9063 				struct ip6_hdr *ip6 =
   9064 				    (void *)(mtod(m0, char *) + offset);
   9065 				th = (void *)(mtod(m0, char *) + hlen);
   9066 
   9067 				ip6->ip6_plen = 0;
   9068 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   9069 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   9070 			}
   9071 			tcp_hlen = th->th_off << 2;
   9072 		}
   9073 		hlen += tcp_hlen;
   9074 		*cmdlenp |= NQTX_CMD_TSE;
   9075 
   9076 		if (v4) {
   9077 			WM_Q_EVCNT_INCR(txq, tso);
   9078 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   9079 		} else {
   9080 			WM_Q_EVCNT_INCR(txq, tso6);
   9081 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   9082 		}
   9083 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   9084 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9085 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   9086 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   9087 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   9088 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   9089 	} else {
   9090 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   9091 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   9092 	}
   9093 
   9094 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   9095 		*fieldsp |= NQTXD_FIELDS_IXSM;
   9096 		cmdc |= NQTXC_CMD_IP4;
   9097 	}
   9098 
   9099 	if (m0->m_pkthdr.csum_flags &
   9100 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   9101 		WM_Q_EVCNT_INCR(txq, tusum);
   9102 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   9103 			cmdc |= NQTXC_CMD_TCP;
   9104 		else
   9105 			cmdc |= NQTXC_CMD_UDP;
   9106 
   9107 		cmdc |= NQTXC_CMD_IP4;
   9108 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9109 	}
   9110 	if (m0->m_pkthdr.csum_flags &
   9111 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   9112 		WM_Q_EVCNT_INCR(txq, tusum6);
   9113 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   9114 			cmdc |= NQTXC_CMD_TCP;
   9115 		else
   9116 			cmdc |= NQTXC_CMD_UDP;
   9117 
   9118 		cmdc |= NQTXC_CMD_IP6;
   9119 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   9120 	}
   9121 
   9122 	/*
   9123 	 * We don't have to write context descriptor for every packet to
   9124 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   9125 	 * I210 and I211. It is enough to write once per a Tx queue for these
   9126 	 * controllers.
   9127 	 * It would be overhead to write context descriptor for every packet,
   9128 	 * however it does not cause problems.
   9129 	 */
   9130 	/* Fill in the context descriptor. */
   9131 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
   9132 	    htole32(vl_len);
   9133 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
   9134 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
   9135 	    htole32(cmdc);
   9136 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
   9137 	    htole32(mssidx);
   9138 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   9139 	DPRINTF(sc, WM_DEBUG_TX,
   9140 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   9141 		txq->txq_next, 0, vl_len));
   9142 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   9143 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   9144 	txs->txs_ndesc++;
   9145 }
   9146 
   9147 /*
   9148  * wm_nq_start:		[ifnet interface function]
   9149  *
   9150  *	Start packet transmission on the interface for NEWQUEUE devices
   9151  */
   9152 static void
   9153 wm_nq_start(struct ifnet *ifp)
   9154 {
   9155 	struct wm_softc *sc = ifp->if_softc;
   9156 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9157 
   9158 	KASSERT(if_is_mpsafe(ifp));
   9159 	/*
   9160 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   9161 	 */
   9162 
   9163 	mutex_enter(txq->txq_lock);
   9164 	if (!txq->txq_stopping)
   9165 		wm_nq_start_locked(ifp);
   9166 	mutex_exit(txq->txq_lock);
   9167 }
   9168 
   9169 static void
   9170 wm_nq_start_locked(struct ifnet *ifp)
   9171 {
   9172 	struct wm_softc *sc = ifp->if_softc;
   9173 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   9174 
   9175 	wm_nq_send_common_locked(ifp, txq, false);
   9176 }
   9177 
   9178 static int
   9179 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   9180 {
   9181 	int qid;
   9182 	struct wm_softc *sc = ifp->if_softc;
   9183 	struct wm_txqueue *txq;
   9184 
   9185 	qid = wm_select_txqueue(ifp, m);
   9186 	txq = &sc->sc_queue[qid].wmq_txq;
   9187 
   9188 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   9189 		m_freem(m);
   9190 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   9191 		return ENOBUFS;
   9192 	}
   9193 
   9194 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   9195 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   9196 	if (m->m_flags & M_MCAST)
   9197 		if_statinc_ref(nsr, if_omcasts);
   9198 	IF_STAT_PUTREF(ifp);
   9199 
   9200 	/*
   9201 	 * The situations which this mutex_tryenter() fails at running time
   9202 	 * are below two patterns.
   9203 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   9204 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   9205 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   9206 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   9207 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   9208 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   9209 	 * stuck, either.
   9210 	 */
   9211 	if (mutex_tryenter(txq->txq_lock)) {
   9212 		if (!txq->txq_stopping)
   9213 			wm_nq_transmit_locked(ifp, txq);
   9214 		mutex_exit(txq->txq_lock);
   9215 	}
   9216 
   9217 	return 0;
   9218 }
   9219 
   9220 static void
   9221 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9222 {
   9223 
   9224 	wm_nq_send_common_locked(ifp, txq, true);
   9225 }
   9226 
   9227 static void
   9228 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9229     bool is_transmit)
   9230 {
   9231 	struct wm_softc *sc = ifp->if_softc;
   9232 	struct mbuf *m0;
   9233 	struct wm_txsoft *txs;
   9234 	bus_dmamap_t dmamap;
   9235 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9236 	bool do_csum, sent;
   9237 	bool remap = true;
   9238 
   9239 	KASSERT(mutex_owned(txq->txq_lock));
   9240 	KASSERT(!txq->txq_stopping);
   9241 
   9242 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9243 		return;
   9244 
   9245 	if (__predict_false(wm_linkdown_discard(txq))) {
   9246 		do {
   9247 			if (is_transmit)
   9248 				m0 = pcq_get(txq->txq_interq);
   9249 			else
   9250 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9251 			/*
   9252 			 * increment successed packet counter as in the case
   9253 			 * which the packet is discarded by link down PHY.
   9254 			 */
   9255 			if (m0 != NULL) {
   9256 				if_statinc(ifp, if_opackets);
   9257 				m_freem(m0);
   9258 			}
   9259 		} while (m0 != NULL);
   9260 		return;
   9261 	}
   9262 
   9263 	sent = false;
   9264 
   9265 	/*
   9266 	 * Loop through the send queue, setting up transmit descriptors
   9267 	 * until we drain the queue, or use up all available transmit
   9268 	 * descriptors.
   9269 	 */
   9270 	for (;;) {
   9271 		m0 = NULL;
   9272 
   9273 		/* Get a work queue entry. */
   9274 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9275 			wm_txeof(txq, UINT_MAX);
   9276 			if (txq->txq_sfree == 0) {
   9277 				DPRINTF(sc, WM_DEBUG_TX,
   9278 				    ("%s: TX: no free job descriptors\n",
   9279 					device_xname(sc->sc_dev)));
   9280 				WM_Q_EVCNT_INCR(txq, txsstall);
   9281 				break;
   9282 			}
   9283 		}
   9284 
   9285 		/* Grab a packet off the queue. */
   9286 		if (is_transmit)
   9287 			m0 = pcq_get(txq->txq_interq);
   9288 		else
   9289 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9290 		if (m0 == NULL)
   9291 			break;
   9292 
   9293 		DPRINTF(sc, WM_DEBUG_TX,
   9294 		    ("%s: TX: have packet to transmit: %p\n",
   9295 			device_xname(sc->sc_dev), m0));
   9296 
   9297 		txs = &txq->txq_soft[txq->txq_snext];
   9298 		dmamap = txs->txs_dmamap;
   9299 
   9300 		/*
   9301 		 * Load the DMA map.  If this fails, the packet either
   9302 		 * didn't fit in the allotted number of segments, or we
   9303 		 * were short on resources.  For the too-many-segments
   9304 		 * case, we simply report an error and drop the packet,
   9305 		 * since we can't sanely copy a jumbo packet to a single
   9306 		 * buffer.
   9307 		 */
   9308 retry:
   9309 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9310 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9311 		if (__predict_false(error)) {
   9312 			if (error == EFBIG) {
   9313 				if (remap == true) {
   9314 					struct mbuf *m;
   9315 
   9316 					remap = false;
   9317 					m = m_defrag(m0, M_NOWAIT);
   9318 					if (m != NULL) {
   9319 						WM_Q_EVCNT_INCR(txq, defrag);
   9320 						m0 = m;
   9321 						goto retry;
   9322 					}
   9323 				}
   9324 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9325 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9326 				    "DMA segments, dropping...\n",
   9327 				    device_xname(sc->sc_dev));
   9328 				wm_dump_mbuf_chain(sc, m0);
   9329 				m_freem(m0);
   9330 				continue;
   9331 			}
   9332 			/* Short on resources, just stop for now. */
   9333 			DPRINTF(sc, WM_DEBUG_TX,
   9334 			    ("%s: TX: dmamap load failed: %d\n",
   9335 				device_xname(sc->sc_dev), error));
   9336 			break;
   9337 		}
   9338 
   9339 		segs_needed = dmamap->dm_nsegs;
   9340 
   9341 		/*
   9342 		 * Ensure we have enough descriptors free to describe
   9343 		 * the packet. Note, we always reserve one descriptor
   9344 		 * at the end of the ring due to the semantics of the
   9345 		 * TDT register, plus one more in the event we need
   9346 		 * to load offload context.
   9347 		 */
   9348 		if (segs_needed > txq->txq_free - 2) {
   9349 			/*
   9350 			 * Not enough free descriptors to transmit this
   9351 			 * packet.  We haven't committed anything yet,
   9352 			 * so just unload the DMA map, put the packet
   9353 			 * pack on the queue, and punt. Notify the upper
   9354 			 * layer that there are no more slots left.
   9355 			 */
   9356 			DPRINTF(sc, WM_DEBUG_TX,
   9357 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9358 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9359 				segs_needed, txq->txq_free - 1));
   9360 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9361 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9362 			WM_Q_EVCNT_INCR(txq, txdstall);
   9363 			break;
   9364 		}
   9365 
   9366 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9367 
   9368 		DPRINTF(sc, WM_DEBUG_TX,
   9369 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9370 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9371 
   9372 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9373 
   9374 		/*
   9375 		 * Store a pointer to the packet so that we can free it
   9376 		 * later.
   9377 		 *
   9378 		 * Initially, we consider the number of descriptors the
   9379 		 * packet uses the number of DMA segments.  This may be
   9380 		 * incremented by 1 if we do checksum offload (a descriptor
   9381 		 * is used to set the checksum context).
   9382 		 */
   9383 		txs->txs_mbuf = m0;
   9384 		txs->txs_firstdesc = txq->txq_next;
   9385 		txs->txs_ndesc = segs_needed;
   9386 
   9387 		/* Set up offload parameters for this packet. */
   9388 		uint32_t cmdlen, fields, dcmdlen;
   9389 		if (m0->m_pkthdr.csum_flags &
   9390 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9391 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9392 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9393 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9394 			    &do_csum);
   9395 		} else {
   9396 			do_csum = false;
   9397 			cmdlen = 0;
   9398 			fields = 0;
   9399 		}
   9400 
   9401 		/* Sync the DMA map. */
   9402 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9403 		    BUS_DMASYNC_PREWRITE);
   9404 
   9405 		/* Initialize the first transmit descriptor. */
   9406 		nexttx = txq->txq_next;
   9407 		if (!do_csum) {
   9408 			/* Set up a legacy descriptor */
   9409 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9410 			    dmamap->dm_segs[0].ds_addr);
   9411 			txq->txq_descs[nexttx].wtx_cmdlen =
   9412 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9413 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9414 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9415 			if (vlan_has_tag(m0)) {
   9416 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9417 				    htole32(WTX_CMD_VLE);
   9418 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9419 				    htole16(vlan_get_tag(m0));
   9420 			} else
   9421 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9422 
   9423 			dcmdlen = 0;
   9424 		} else {
   9425 			/* Set up an advanced data descriptor */
   9426 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9427 			    htole64(dmamap->dm_segs[0].ds_addr);
   9428 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9429 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9430 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9431 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9432 			    htole32(fields);
   9433 			DPRINTF(sc, WM_DEBUG_TX,
   9434 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9435 				device_xname(sc->sc_dev), nexttx,
   9436 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9437 			DPRINTF(sc, WM_DEBUG_TX,
   9438 			    ("\t 0x%08x%08x\n", fields,
   9439 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9440 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9441 		}
   9442 
   9443 		lasttx = nexttx;
   9444 		nexttx = WM_NEXTTX(txq, nexttx);
   9445 		/*
   9446 		 * Fill in the next descriptors. Legacy or advanced format
   9447 		 * is the same here.
   9448 		 */
   9449 		for (seg = 1; seg < dmamap->dm_nsegs;
   9450 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9451 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9452 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9453 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9454 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9455 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9456 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9457 			lasttx = nexttx;
   9458 
   9459 			DPRINTF(sc, WM_DEBUG_TX,
   9460 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9461 				device_xname(sc->sc_dev), nexttx,
   9462 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9463 				dmamap->dm_segs[seg].ds_len));
   9464 		}
   9465 
   9466 		KASSERT(lasttx != -1);
   9467 
   9468 		/*
   9469 		 * Set up the command byte on the last descriptor of
   9470 		 * the packet. If we're in the interrupt delay window,
   9471 		 * delay the interrupt.
   9472 		 */
   9473 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9474 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9475 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9476 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9477 
   9478 		txs->txs_lastdesc = lasttx;
   9479 
   9480 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9481 		    device_xname(sc->sc_dev),
   9482 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9483 
   9484 		/* Sync the descriptors we're using. */
   9485 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9486 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9487 
   9488 		/* Give the packet to the chip. */
   9489 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9490 		sent = true;
   9491 
   9492 		DPRINTF(sc, WM_DEBUG_TX,
   9493 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9494 
   9495 		DPRINTF(sc, WM_DEBUG_TX,
   9496 		    ("%s: TX: finished transmitting packet, job %d\n",
   9497 			device_xname(sc->sc_dev), txq->txq_snext));
   9498 
   9499 		/* Advance the tx pointer. */
   9500 		txq->txq_free -= txs->txs_ndesc;
   9501 		txq->txq_next = nexttx;
   9502 
   9503 		txq->txq_sfree--;
   9504 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9505 
   9506 		/* Pass the packet to any BPF listeners. */
   9507 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9508 	}
   9509 
   9510 	if (m0 != NULL) {
   9511 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9512 		WM_Q_EVCNT_INCR(txq, descdrop);
   9513 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9514 			__func__));
   9515 		m_freem(m0);
   9516 	}
   9517 
   9518 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9519 		/* No more slots; notify upper layer. */
   9520 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9521 	}
   9522 
   9523 	if (sent) {
   9524 		/* Set a watchdog timer in case the chip flakes out. */
   9525 		txq->txq_lastsent = time_uptime;
   9526 		txq->txq_sending = true;
   9527 	}
   9528 }
   9529 
   9530 static void
   9531 wm_deferred_start_locked(struct wm_txqueue *txq)
   9532 {
   9533 	struct wm_softc *sc = txq->txq_sc;
   9534 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9535 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9536 	int qid = wmq->wmq_id;
   9537 
   9538 	KASSERT(mutex_owned(txq->txq_lock));
   9539 	KASSERT(!txq->txq_stopping);
   9540 
   9541 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9542 		/* XXX need for ALTQ or one CPU system */
   9543 		if (qid == 0)
   9544 			wm_nq_start_locked(ifp);
   9545 		wm_nq_transmit_locked(ifp, txq);
   9546 	} else {
   9547 		/* XXX need for ALTQ or one CPU system */
   9548 		if (qid == 0)
   9549 			wm_start_locked(ifp);
   9550 		wm_transmit_locked(ifp, txq);
   9551 	}
   9552 }
   9553 
   9554 /* Interrupt */
   9555 
   9556 /*
   9557  * wm_txeof:
   9558  *
   9559  *	Helper; handle transmit interrupts.
   9560  */
   9561 static bool
   9562 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9563 {
   9564 	struct wm_softc *sc = txq->txq_sc;
   9565 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9566 	struct wm_txsoft *txs;
   9567 	int count = 0;
   9568 	int i;
   9569 	uint8_t status;
   9570 	bool more = false;
   9571 
   9572 	KASSERT(mutex_owned(txq->txq_lock));
   9573 
   9574 	if (txq->txq_stopping)
   9575 		return false;
   9576 
   9577 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9578 
   9579 	/*
   9580 	 * Go through the Tx list and free mbufs for those
   9581 	 * frames which have been transmitted.
   9582 	 */
   9583 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9584 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9585 		txs = &txq->txq_soft[i];
   9586 
   9587 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9588 			device_xname(sc->sc_dev), i));
   9589 
   9590 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9591 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9592 
   9593 		status =
   9594 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9595 		if ((status & WTX_ST_DD) == 0) {
   9596 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9597 			    BUS_DMASYNC_PREREAD);
   9598 			break;
   9599 		}
   9600 
   9601 		if (limit-- == 0) {
   9602 			more = true;
   9603 			DPRINTF(sc, WM_DEBUG_TX,
   9604 			    ("%s: TX: loop limited, job %d is not processed\n",
   9605 				device_xname(sc->sc_dev), i));
   9606 			break;
   9607 		}
   9608 
   9609 		count++;
   9610 		DPRINTF(sc, WM_DEBUG_TX,
   9611 		    ("%s: TX: job %d done: descs %d..%d\n",
   9612 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9613 		    txs->txs_lastdesc));
   9614 
   9615 #ifdef WM_EVENT_COUNTERS
   9616 		if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
   9617 			WM_Q_EVCNT_INCR(txq, underrun);
   9618 #endif /* WM_EVENT_COUNTERS */
   9619 
   9620 		/*
   9621 		 * 82574 and newer's document says the status field has neither
   9622 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9623 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9624 		 * Developer's Manual", 82574 datasheet and newer.
   9625 		 *
   9626 		 * XXX I saw the LC bit was set on I218 even though the media
   9627 		 * was full duplex, so the bit might be used for other
   9628 		 * meaning ...(I have no document).
   9629 		 */
   9630 
   9631 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9632 		    && ((sc->sc_type < WM_T_82574)
   9633 			|| (sc->sc_type == WM_T_80003))) {
   9634 			if_statinc(ifp, if_oerrors);
   9635 			if (status & WTX_ST_LC)
   9636 				log(LOG_WARNING, "%s: late collision\n",
   9637 				    device_xname(sc->sc_dev));
   9638 			else if (status & WTX_ST_EC) {
   9639 				if_statadd(ifp, if_collisions,
   9640 				    TX_COLLISION_THRESHOLD + 1);
   9641 				log(LOG_WARNING, "%s: excessive collisions\n",
   9642 				    device_xname(sc->sc_dev));
   9643 			}
   9644 		} else
   9645 			if_statinc(ifp, if_opackets);
   9646 
   9647 		txq->txq_packets++;
   9648 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9649 
   9650 		txq->txq_free += txs->txs_ndesc;
   9651 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9652 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9653 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9654 		m_freem(txs->txs_mbuf);
   9655 		txs->txs_mbuf = NULL;
   9656 	}
   9657 
   9658 	/* Update the dirty transmit buffer pointer. */
   9659 	txq->txq_sdirty = i;
   9660 	DPRINTF(sc, WM_DEBUG_TX,
   9661 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9662 
   9663 	if (count != 0)
   9664 		rnd_add_uint32(&sc->rnd_source, count);
   9665 
   9666 	/*
   9667 	 * If there are no more pending transmissions, cancel the watchdog
   9668 	 * timer.
   9669 	 */
   9670 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9671 		txq->txq_sending = false;
   9672 
   9673 	return more;
   9674 }
   9675 
   9676 static inline uint32_t
   9677 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9678 {
   9679 	struct wm_softc *sc = rxq->rxq_sc;
   9680 
   9681 	if (sc->sc_type == WM_T_82574)
   9682 		return EXTRXC_STATUS(
   9683 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9684 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9685 		return NQRXC_STATUS(
   9686 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9687 	else
   9688 		return rxq->rxq_descs[idx].wrx_status;
   9689 }
   9690 
   9691 static inline uint32_t
   9692 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9693 {
   9694 	struct wm_softc *sc = rxq->rxq_sc;
   9695 
   9696 	if (sc->sc_type == WM_T_82574)
   9697 		return EXTRXC_ERROR(
   9698 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9699 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9700 		return NQRXC_ERROR(
   9701 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9702 	else
   9703 		return rxq->rxq_descs[idx].wrx_errors;
   9704 }
   9705 
   9706 static inline uint16_t
   9707 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9708 {
   9709 	struct wm_softc *sc = rxq->rxq_sc;
   9710 
   9711 	if (sc->sc_type == WM_T_82574)
   9712 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9713 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9714 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9715 	else
   9716 		return rxq->rxq_descs[idx].wrx_special;
   9717 }
   9718 
   9719 static inline int
   9720 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9721 {
   9722 	struct wm_softc *sc = rxq->rxq_sc;
   9723 
   9724 	if (sc->sc_type == WM_T_82574)
   9725 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9726 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9727 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9728 	else
   9729 		return rxq->rxq_descs[idx].wrx_len;
   9730 }
   9731 
   9732 #ifdef WM_DEBUG
   9733 static inline uint32_t
   9734 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9735 {
   9736 	struct wm_softc *sc = rxq->rxq_sc;
   9737 
   9738 	if (sc->sc_type == WM_T_82574)
   9739 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9740 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9741 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9742 	else
   9743 		return 0;
   9744 }
   9745 
   9746 static inline uint8_t
   9747 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9748 {
   9749 	struct wm_softc *sc = rxq->rxq_sc;
   9750 
   9751 	if (sc->sc_type == WM_T_82574)
   9752 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9753 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9754 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9755 	else
   9756 		return 0;
   9757 }
   9758 #endif /* WM_DEBUG */
   9759 
   9760 static inline bool
   9761 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9762     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9763 {
   9764 
   9765 	if (sc->sc_type == WM_T_82574)
   9766 		return (status & ext_bit) != 0;
   9767 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9768 		return (status & nq_bit) != 0;
   9769 	else
   9770 		return (status & legacy_bit) != 0;
   9771 }
   9772 
   9773 static inline bool
   9774 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9775     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9776 {
   9777 
   9778 	if (sc->sc_type == WM_T_82574)
   9779 		return (error & ext_bit) != 0;
   9780 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9781 		return (error & nq_bit) != 0;
   9782 	else
   9783 		return (error & legacy_bit) != 0;
   9784 }
   9785 
   9786 static inline bool
   9787 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9788 {
   9789 
   9790 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9791 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9792 		return true;
   9793 	else
   9794 		return false;
   9795 }
   9796 
   9797 static inline bool
   9798 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9799 {
   9800 	struct wm_softc *sc = rxq->rxq_sc;
   9801 
   9802 	/* XXX missing error bit for newqueue? */
   9803 	if (wm_rxdesc_is_set_error(sc, errors,
   9804 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9805 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9806 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9807 		NQRXC_ERROR_RXE)) {
   9808 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9809 		    EXTRXC_ERROR_SE, 0))
   9810 			log(LOG_WARNING, "%s: symbol error\n",
   9811 			    device_xname(sc->sc_dev));
   9812 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9813 		    EXTRXC_ERROR_SEQ, 0))
   9814 			log(LOG_WARNING, "%s: receive sequence error\n",
   9815 			    device_xname(sc->sc_dev));
   9816 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9817 		    EXTRXC_ERROR_CE, 0))
   9818 			log(LOG_WARNING, "%s: CRC error\n",
   9819 			    device_xname(sc->sc_dev));
   9820 		return true;
   9821 	}
   9822 
   9823 	return false;
   9824 }
   9825 
   9826 static inline bool
   9827 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9828 {
   9829 	struct wm_softc *sc = rxq->rxq_sc;
   9830 
   9831 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9832 		NQRXC_STATUS_DD)) {
   9833 		/* We have processed all of the receive descriptors. */
   9834 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9835 		return false;
   9836 	}
   9837 
   9838 	return true;
   9839 }
   9840 
   9841 static inline bool
   9842 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9843     uint16_t vlantag, struct mbuf *m)
   9844 {
   9845 
   9846 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9847 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9848 		vlan_set_tag(m, le16toh(vlantag));
   9849 	}
   9850 
   9851 	return true;
   9852 }
   9853 
   9854 static inline void
   9855 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9856     uint32_t errors, struct mbuf *m)
   9857 {
   9858 	struct wm_softc *sc = rxq->rxq_sc;
   9859 
   9860 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9861 		if (wm_rxdesc_is_set_status(sc, status,
   9862 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9863 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9864 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9865 			if (wm_rxdesc_is_set_error(sc, errors,
   9866 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9867 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9868 		}
   9869 		if (wm_rxdesc_is_set_status(sc, status,
   9870 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9871 			/*
   9872 			 * Note: we don't know if this was TCP or UDP,
   9873 			 * so we just set both bits, and expect the
   9874 			 * upper layers to deal.
   9875 			 */
   9876 			WM_Q_EVCNT_INCR(rxq, tusum);
   9877 			m->m_pkthdr.csum_flags |=
   9878 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9879 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9880 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9881 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9882 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9883 		}
   9884 	}
   9885 }
   9886 
   9887 /*
   9888  * wm_rxeof:
   9889  *
   9890  *	Helper; handle receive interrupts.
   9891  */
   9892 static bool
   9893 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9894 {
   9895 	struct wm_softc *sc = rxq->rxq_sc;
   9896 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9897 	struct wm_rxsoft *rxs;
   9898 	struct mbuf *m;
   9899 	int i, len;
   9900 	int count = 0;
   9901 	uint32_t status, errors;
   9902 	uint16_t vlantag;
   9903 	bool more = false;
   9904 
   9905 	KASSERT(mutex_owned(rxq->rxq_lock));
   9906 
   9907 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9908 		rxs = &rxq->rxq_soft[i];
   9909 
   9910 		DPRINTF(sc, WM_DEBUG_RX,
   9911 		    ("%s: RX: checking descriptor %d\n",
   9912 			device_xname(sc->sc_dev), i));
   9913 		wm_cdrxsync(rxq, i,
   9914 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9915 
   9916 		status = wm_rxdesc_get_status(rxq, i);
   9917 		errors = wm_rxdesc_get_errors(rxq, i);
   9918 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9919 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9920 #ifdef WM_DEBUG
   9921 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9922 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9923 #endif
   9924 
   9925 		if (!wm_rxdesc_dd(rxq, i, status))
   9926 			break;
   9927 
   9928 		if (limit-- == 0) {
   9929 			more = true;
   9930 			DPRINTF(sc, WM_DEBUG_RX,
   9931 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9932 				device_xname(sc->sc_dev), i));
   9933 			break;
   9934 		}
   9935 
   9936 		count++;
   9937 		if (__predict_false(rxq->rxq_discard)) {
   9938 			DPRINTF(sc, WM_DEBUG_RX,
   9939 			    ("%s: RX: discarding contents of descriptor %d\n",
   9940 				device_xname(sc->sc_dev), i));
   9941 			wm_init_rxdesc(rxq, i);
   9942 			if (wm_rxdesc_is_eop(rxq, status)) {
   9943 				/* Reset our state. */
   9944 				DPRINTF(sc, WM_DEBUG_RX,
   9945 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9946 					device_xname(sc->sc_dev)));
   9947 				rxq->rxq_discard = 0;
   9948 			}
   9949 			continue;
   9950 		}
   9951 
   9952 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9953 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9954 
   9955 		m = rxs->rxs_mbuf;
   9956 
   9957 		/*
   9958 		 * Add a new receive buffer to the ring, unless of
   9959 		 * course the length is zero. Treat the latter as a
   9960 		 * failed mapping.
   9961 		 */
   9962 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9963 			/*
   9964 			 * Failed, throw away what we've done so
   9965 			 * far, and discard the rest of the packet.
   9966 			 */
   9967 			if_statinc(ifp, if_ierrors);
   9968 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9969 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9970 			wm_init_rxdesc(rxq, i);
   9971 			if (!wm_rxdesc_is_eop(rxq, status))
   9972 				rxq->rxq_discard = 1;
   9973 			if (rxq->rxq_head != NULL)
   9974 				m_freem(rxq->rxq_head);
   9975 			WM_RXCHAIN_RESET(rxq);
   9976 			DPRINTF(sc, WM_DEBUG_RX,
   9977 			    ("%s: RX: Rx buffer allocation failed, "
   9978 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9979 				rxq->rxq_discard ? " (discard)" : ""));
   9980 			continue;
   9981 		}
   9982 
   9983 		m->m_len = len;
   9984 		rxq->rxq_len += len;
   9985 		DPRINTF(sc, WM_DEBUG_RX,
   9986 		    ("%s: RX: buffer at %p len %d\n",
   9987 			device_xname(sc->sc_dev), m->m_data, len));
   9988 
   9989 		/* If this is not the end of the packet, keep looking. */
   9990 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9991 			WM_RXCHAIN_LINK(rxq, m);
   9992 			DPRINTF(sc, WM_DEBUG_RX,
   9993 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9994 				device_xname(sc->sc_dev), rxq->rxq_len));
   9995 			continue;
   9996 		}
   9997 
   9998 		/*
   9999 		 * Okay, we have the entire packet now. The chip is
   10000 		 * configured to include the FCS except I35[04], I21[01].
   10001 		 * (not all chips can be configured to strip it), so we need
   10002 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   10003 		 * in RCTL register is always set, so we don't trim it.
   10004 		 * PCH2 and newer chip also not include FCS when jumbo
   10005 		 * frame is used to do workaround an errata.
   10006 		 * May need to adjust length of previous mbuf in the
   10007 		 * chain if the current mbuf is too short.
   10008 		 */
   10009 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   10010 			if (m->m_len < ETHER_CRC_LEN) {
   10011 				rxq->rxq_tail->m_len
   10012 				    -= (ETHER_CRC_LEN - m->m_len);
   10013 				m->m_len = 0;
   10014 			} else
   10015 				m->m_len -= ETHER_CRC_LEN;
   10016 			len = rxq->rxq_len - ETHER_CRC_LEN;
   10017 		} else
   10018 			len = rxq->rxq_len;
   10019 
   10020 		WM_RXCHAIN_LINK(rxq, m);
   10021 
   10022 		*rxq->rxq_tailp = NULL;
   10023 		m = rxq->rxq_head;
   10024 
   10025 		WM_RXCHAIN_RESET(rxq);
   10026 
   10027 		DPRINTF(sc, WM_DEBUG_RX,
   10028 		    ("%s: RX: have entire packet, len -> %d\n",
   10029 			device_xname(sc->sc_dev), len));
   10030 
   10031 		/* If an error occurred, update stats and drop the packet. */
   10032 		if (wm_rxdesc_has_errors(rxq, errors)) {
   10033 			m_freem(m);
   10034 			continue;
   10035 		}
   10036 
   10037 		/* No errors.  Receive the packet. */
   10038 		m_set_rcvif(m, ifp);
   10039 		m->m_pkthdr.len = len;
   10040 		/*
   10041 		 * TODO
   10042 		 * should be save rsshash and rsstype to this mbuf.
   10043 		 */
   10044 		DPRINTF(sc, WM_DEBUG_RX,
   10045 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   10046 			device_xname(sc->sc_dev), rsstype, rsshash));
   10047 
   10048 		/*
   10049 		 * If VLANs are enabled, VLAN packets have been unwrapped
   10050 		 * for us.  Associate the tag with the packet.
   10051 		 */
   10052 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   10053 			continue;
   10054 
   10055 		/* Set up checksum info for this packet. */
   10056 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   10057 
   10058 		rxq->rxq_packets++;
   10059 		rxq->rxq_bytes += len;
   10060 		/* Pass it on. */
   10061 		if_percpuq_enqueue(sc->sc_ipq, m);
   10062 
   10063 		if (rxq->rxq_stopping)
   10064 			break;
   10065 	}
   10066 	rxq->rxq_ptr = i;
   10067 
   10068 	if (count != 0)
   10069 		rnd_add_uint32(&sc->rnd_source, count);
   10070 
   10071 	DPRINTF(sc, WM_DEBUG_RX,
   10072 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   10073 
   10074 	return more;
   10075 }
   10076 
   10077 /*
   10078  * wm_linkintr_gmii:
   10079  *
   10080  *	Helper; handle link interrupts for GMII.
   10081  */
   10082 static void
   10083 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   10084 {
   10085 	device_t dev = sc->sc_dev;
   10086 	uint32_t status, reg;
   10087 	bool link;
   10088 	int rv;
   10089 
   10090 	KASSERT(mutex_owned(sc->sc_core_lock));
   10091 
   10092 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   10093 		__func__));
   10094 
   10095 	if ((icr & ICR_LSC) == 0) {
   10096 		if (icr & ICR_RXSEQ)
   10097 			DPRINTF(sc, WM_DEBUG_LINK,
   10098 			    ("%s: LINK Receive sequence error\n",
   10099 				device_xname(dev)));
   10100 		return;
   10101 	}
   10102 
   10103 	/* Link status changed */
   10104 	status = CSR_READ(sc, WMREG_STATUS);
   10105 	link = status & STATUS_LU;
   10106 	if (link) {
   10107 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10108 			device_xname(dev),
   10109 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10110 		if (wm_phy_need_linkdown_discard(sc)) {
   10111 			DPRINTF(sc, WM_DEBUG_LINK,
   10112 			    ("%s: linkintr: Clear linkdown discard flag\n",
   10113 				device_xname(dev)));
   10114 			wm_clear_linkdown_discard(sc);
   10115 		}
   10116 	} else {
   10117 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10118 			device_xname(dev)));
   10119 		if (wm_phy_need_linkdown_discard(sc)) {
   10120 			DPRINTF(sc, WM_DEBUG_LINK,
   10121 			    ("%s: linkintr: Set linkdown discard flag\n",
   10122 				device_xname(dev)));
   10123 			wm_set_linkdown_discard(sc);
   10124 		}
   10125 	}
   10126 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   10127 		wm_gig_downshift_workaround_ich8lan(sc);
   10128 
   10129 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   10130 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   10131 
   10132 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   10133 		device_xname(dev)));
   10134 	mii_pollstat(&sc->sc_mii);
   10135 	if (sc->sc_type == WM_T_82543) {
   10136 		int miistatus, active;
   10137 
   10138 		/*
   10139 		 * With 82543, we need to force speed and
   10140 		 * duplex on the MAC equal to what the PHY
   10141 		 * speed and duplex configuration is.
   10142 		 */
   10143 		miistatus = sc->sc_mii.mii_media_status;
   10144 
   10145 		if (miistatus & IFM_ACTIVE) {
   10146 			active = sc->sc_mii.mii_media_active;
   10147 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10148 			switch (IFM_SUBTYPE(active)) {
   10149 			case IFM_10_T:
   10150 				sc->sc_ctrl |= CTRL_SPEED_10;
   10151 				break;
   10152 			case IFM_100_TX:
   10153 				sc->sc_ctrl |= CTRL_SPEED_100;
   10154 				break;
   10155 			case IFM_1000_T:
   10156 				sc->sc_ctrl |= CTRL_SPEED_1000;
   10157 				break;
   10158 			default:
   10159 				/*
   10160 				 * Fiber?
   10161 				 * Shoud not enter here.
   10162 				 */
   10163 				device_printf(dev, "unknown media (%x)\n",
   10164 				    active);
   10165 				break;
   10166 			}
   10167 			if (active & IFM_FDX)
   10168 				sc->sc_ctrl |= CTRL_FD;
   10169 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10170 		}
   10171 	} else if (sc->sc_type == WM_T_PCH) {
   10172 		wm_k1_gig_workaround_hv(sc,
   10173 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10174 	}
   10175 
   10176 	/*
   10177 	 * When connected at 10Mbps half-duplex, some parts are excessively
   10178 	 * aggressive resulting in many collisions. To avoid this, increase
   10179 	 * the IPG and reduce Rx latency in the PHY.
   10180 	 */
   10181 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   10182 	    && link) {
   10183 		uint32_t tipg_reg;
   10184 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   10185 		bool fdx;
   10186 		uint16_t emi_addr, emi_val;
   10187 
   10188 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   10189 		tipg_reg &= ~TIPG_IPGT_MASK;
   10190 		fdx = status & STATUS_FD;
   10191 
   10192 		if (!fdx && (speed == STATUS_SPEED_10)) {
   10193 			tipg_reg |= 0xff;
   10194 			/* Reduce Rx latency in analog PHY */
   10195 			emi_val = 0;
   10196 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   10197 		    fdx && speed != STATUS_SPEED_1000) {
   10198 			tipg_reg |= 0xc;
   10199 			emi_val = 1;
   10200 		} else {
   10201 			/* Roll back the default values */
   10202 			tipg_reg |= 0x08;
   10203 			emi_val = 1;
   10204 		}
   10205 
   10206 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   10207 
   10208 		rv = sc->phy.acquire(sc);
   10209 		if (rv)
   10210 			return;
   10211 
   10212 		if (sc->sc_type == WM_T_PCH2)
   10213 			emi_addr = I82579_RX_CONFIG;
   10214 		else
   10215 			emi_addr = I217_RX_CONFIG;
   10216 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10217 
   10218 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10219 			uint16_t phy_reg;
   10220 
   10221 			sc->phy.readreg_locked(dev, 2,
   10222 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10223 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10224 			if (speed == STATUS_SPEED_100
   10225 			    || speed == STATUS_SPEED_10)
   10226 				phy_reg |= 0x3e8;
   10227 			else
   10228 				phy_reg |= 0xfa;
   10229 			sc->phy.writereg_locked(dev, 2,
   10230 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10231 
   10232 			if (speed == STATUS_SPEED_1000) {
   10233 				sc->phy.readreg_locked(dev, 2,
   10234 				    HV_PM_CTRL, &phy_reg);
   10235 
   10236 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10237 
   10238 				sc->phy.writereg_locked(dev, 2,
   10239 				    HV_PM_CTRL, phy_reg);
   10240 			}
   10241 		}
   10242 		sc->phy.release(sc);
   10243 
   10244 		if (rv)
   10245 			return;
   10246 
   10247 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10248 			uint16_t data, ptr_gap;
   10249 
   10250 			if (speed == STATUS_SPEED_1000) {
   10251 				rv = sc->phy.acquire(sc);
   10252 				if (rv)
   10253 					return;
   10254 
   10255 				rv = sc->phy.readreg_locked(dev, 2,
   10256 				    I82579_UNKNOWN1, &data);
   10257 				if (rv) {
   10258 					sc->phy.release(sc);
   10259 					return;
   10260 				}
   10261 
   10262 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10263 				if (ptr_gap < 0x18) {
   10264 					data &= ~(0x3ff << 2);
   10265 					data |= (0x18 << 2);
   10266 					rv = sc->phy.writereg_locked(dev,
   10267 					    2, I82579_UNKNOWN1, data);
   10268 				}
   10269 				sc->phy.release(sc);
   10270 				if (rv)
   10271 					return;
   10272 			} else {
   10273 				rv = sc->phy.acquire(sc);
   10274 				if (rv)
   10275 					return;
   10276 
   10277 				rv = sc->phy.writereg_locked(dev, 2,
   10278 				    I82579_UNKNOWN1, 0xc023);
   10279 				sc->phy.release(sc);
   10280 				if (rv)
   10281 					return;
   10282 
   10283 			}
   10284 		}
   10285 	}
   10286 
   10287 	/*
   10288 	 * I217 Packet Loss issue:
   10289 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10290 	 * on power up.
   10291 	 * Set the Beacon Duration for I217 to 8 usec
   10292 	 */
   10293 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10294 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10295 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10296 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10297 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10298 	}
   10299 
   10300 	/* Work-around I218 hang issue */
   10301 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10302 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10303 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10304 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10305 		wm_k1_workaround_lpt_lp(sc, link);
   10306 
   10307 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10308 		/*
   10309 		 * Set platform power management values for Latency
   10310 		 * Tolerance Reporting (LTR)
   10311 		 */
   10312 		wm_platform_pm_pch_lpt(sc,
   10313 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10314 	}
   10315 
   10316 	/* Clear link partner's EEE ability */
   10317 	sc->eee_lp_ability = 0;
   10318 
   10319 	/* FEXTNVM6 K1-off workaround */
   10320 	if (sc->sc_type == WM_T_PCH_SPT) {
   10321 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10322 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10323 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10324 		else
   10325 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10326 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10327 	}
   10328 
   10329 	if (!link)
   10330 		return;
   10331 
   10332 	switch (sc->sc_type) {
   10333 	case WM_T_PCH2:
   10334 		wm_k1_workaround_lv(sc);
   10335 		/* FALLTHROUGH */
   10336 	case WM_T_PCH:
   10337 		if (sc->sc_phytype == WMPHY_82578)
   10338 			wm_link_stall_workaround_hv(sc);
   10339 		break;
   10340 	default:
   10341 		break;
   10342 	}
   10343 
   10344 	/* Enable/Disable EEE after link up */
   10345 	if (sc->sc_phytype > WMPHY_82579)
   10346 		wm_set_eee_pchlan(sc);
   10347 }
   10348 
   10349 /*
   10350  * wm_linkintr_tbi:
   10351  *
   10352  *	Helper; handle link interrupts for TBI mode.
   10353  */
   10354 static void
   10355 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10356 {
   10357 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10358 	uint32_t status;
   10359 
   10360 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10361 		__func__));
   10362 
   10363 	status = CSR_READ(sc, WMREG_STATUS);
   10364 	if (icr & ICR_LSC) {
   10365 		wm_check_for_link(sc);
   10366 		if (status & STATUS_LU) {
   10367 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10368 				device_xname(sc->sc_dev),
   10369 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10370 			/*
   10371 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10372 			 * so we should update sc->sc_ctrl
   10373 			 */
   10374 
   10375 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10376 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10377 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10378 			if (status & STATUS_FD)
   10379 				sc->sc_tctl |=
   10380 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10381 			else
   10382 				sc->sc_tctl |=
   10383 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10384 			if (sc->sc_ctrl & CTRL_TFCE)
   10385 				sc->sc_fcrtl |= FCRTL_XONE;
   10386 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10387 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10388 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10389 			sc->sc_tbi_linkup = 1;
   10390 			if_link_state_change(ifp, LINK_STATE_UP);
   10391 		} else {
   10392 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10393 				device_xname(sc->sc_dev)));
   10394 			sc->sc_tbi_linkup = 0;
   10395 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10396 		}
   10397 		/* Update LED */
   10398 		wm_tbi_serdes_set_linkled(sc);
   10399 	} else if (icr & ICR_RXSEQ)
   10400 		DPRINTF(sc, WM_DEBUG_LINK,
   10401 		    ("%s: LINK: Receive sequence error\n",
   10402 			device_xname(sc->sc_dev)));
   10403 }
   10404 
   10405 /*
   10406  * wm_linkintr_serdes:
   10407  *
   10408  *	Helper; handle link interrupts for TBI mode.
   10409  */
   10410 static void
   10411 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10412 {
   10413 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10414 	struct mii_data *mii = &sc->sc_mii;
   10415 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10416 	uint32_t pcs_adv, pcs_lpab, reg;
   10417 
   10418 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10419 		__func__));
   10420 
   10421 	if (icr & ICR_LSC) {
   10422 		/* Check PCS */
   10423 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10424 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10425 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10426 				device_xname(sc->sc_dev)));
   10427 			mii->mii_media_status |= IFM_ACTIVE;
   10428 			sc->sc_tbi_linkup = 1;
   10429 			if_link_state_change(ifp, LINK_STATE_UP);
   10430 		} else {
   10431 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10432 				device_xname(sc->sc_dev)));
   10433 			mii->mii_media_status |= IFM_NONE;
   10434 			sc->sc_tbi_linkup = 0;
   10435 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10436 			wm_tbi_serdes_set_linkled(sc);
   10437 			return;
   10438 		}
   10439 		mii->mii_media_active |= IFM_1000_SX;
   10440 		if ((reg & PCS_LSTS_FDX) != 0)
   10441 			mii->mii_media_active |= IFM_FDX;
   10442 		else
   10443 			mii->mii_media_active |= IFM_HDX;
   10444 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10445 			/* Check flow */
   10446 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10447 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10448 				DPRINTF(sc, WM_DEBUG_LINK,
   10449 				    ("XXX LINKOK but not ACOMP\n"));
   10450 				return;
   10451 			}
   10452 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10453 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10454 			DPRINTF(sc, WM_DEBUG_LINK,
   10455 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10456 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10457 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10458 				mii->mii_media_active |= IFM_FLOW
   10459 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10460 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10461 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10462 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10463 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10464 				mii->mii_media_active |= IFM_FLOW
   10465 				    | IFM_ETH_TXPAUSE;
   10466 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10467 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10468 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10469 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10470 				mii->mii_media_active |= IFM_FLOW
   10471 				    | IFM_ETH_RXPAUSE;
   10472 		}
   10473 		/* Update LED */
   10474 		wm_tbi_serdes_set_linkled(sc);
   10475 	} else
   10476 		DPRINTF(sc, WM_DEBUG_LINK,
   10477 		    ("%s: LINK: Receive sequence error\n",
   10478 		    device_xname(sc->sc_dev)));
   10479 }
   10480 
   10481 /*
   10482  * wm_linkintr:
   10483  *
   10484  *	Helper; handle link interrupts.
   10485  */
   10486 static void
   10487 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10488 {
   10489 
   10490 	KASSERT(mutex_owned(sc->sc_core_lock));
   10491 
   10492 	if (sc->sc_flags & WM_F_HAS_MII)
   10493 		wm_linkintr_gmii(sc, icr);
   10494 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10495 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10496 		wm_linkintr_serdes(sc, icr);
   10497 	else
   10498 		wm_linkintr_tbi(sc, icr);
   10499 }
   10500 
   10501 
   10502 static inline void
   10503 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10504 {
   10505 
   10506 	if (wmq->wmq_txrx_use_workqueue) {
   10507 		if (!wmq->wmq_wq_enqueued) {
   10508 			wmq->wmq_wq_enqueued = true;
   10509 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
   10510 			    curcpu());
   10511 		}
   10512 	} else
   10513 		softint_schedule(wmq->wmq_si);
   10514 }
   10515 
   10516 static inline void
   10517 wm_legacy_intr_disable(struct wm_softc *sc)
   10518 {
   10519 
   10520 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10521 }
   10522 
   10523 static inline void
   10524 wm_legacy_intr_enable(struct wm_softc *sc)
   10525 {
   10526 
   10527 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10528 }
   10529 
   10530 /*
   10531  * wm_intr_legacy:
   10532  *
   10533  *	Interrupt service routine for INTx and MSI.
   10534  */
   10535 static int
   10536 wm_intr_legacy(void *arg)
   10537 {
   10538 	struct wm_softc *sc = arg;
   10539 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10540 	struct wm_queue *wmq = &sc->sc_queue[0];
   10541 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10542 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10543 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10544 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10545 	uint32_t icr, rndval = 0;
   10546 	bool more = false;
   10547 
   10548 	icr = CSR_READ(sc, WMREG_ICR);
   10549 	if ((icr & sc->sc_icr) == 0)
   10550 		return 0;
   10551 
   10552 	DPRINTF(sc, WM_DEBUG_TX,
   10553 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10554 	if (rndval == 0)
   10555 		rndval = icr;
   10556 
   10557 	mutex_enter(txq->txq_lock);
   10558 
   10559 	if (txq->txq_stopping) {
   10560 		mutex_exit(txq->txq_lock);
   10561 		return 1;
   10562 	}
   10563 
   10564 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10565 	if (icr & ICR_TXDW) {
   10566 		DPRINTF(sc, WM_DEBUG_TX,
   10567 		    ("%s: TX: got TXDW interrupt\n",
   10568 			device_xname(sc->sc_dev)));
   10569 		WM_Q_EVCNT_INCR(txq, txdw);
   10570 	}
   10571 #endif
   10572 	if (txlimit > 0) {
   10573 		more |= wm_txeof(txq, txlimit);
   10574 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10575 			more = true;
   10576 	} else
   10577 		more = true;
   10578 	mutex_exit(txq->txq_lock);
   10579 
   10580 	mutex_enter(rxq->rxq_lock);
   10581 
   10582 	if (rxq->rxq_stopping) {
   10583 		mutex_exit(rxq->rxq_lock);
   10584 		return 1;
   10585 	}
   10586 
   10587 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10588 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10589 		DPRINTF(sc, WM_DEBUG_RX,
   10590 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10591 			device_xname(sc->sc_dev),
   10592 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10593 		WM_Q_EVCNT_INCR(rxq, intr);
   10594 	}
   10595 #endif
   10596 	if (rxlimit > 0) {
   10597 		/*
   10598 		 * wm_rxeof() does *not* call upper layer functions directly,
   10599 		 * as if_percpuq_enqueue() just call softint_schedule().
   10600 		 * So, we can call wm_rxeof() in interrupt context.
   10601 		 */
   10602 		more = wm_rxeof(rxq, rxlimit);
   10603 	} else
   10604 		more = true;
   10605 
   10606 	mutex_exit(rxq->rxq_lock);
   10607 
   10608 	mutex_enter(sc->sc_core_lock);
   10609 
   10610 	if (sc->sc_core_stopping) {
   10611 		mutex_exit(sc->sc_core_lock);
   10612 		return 1;
   10613 	}
   10614 
   10615 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10616 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10617 		wm_linkintr(sc, icr);
   10618 	}
   10619 	if ((icr & ICR_GPI(0)) != 0)
   10620 		device_printf(sc->sc_dev, "got module interrupt\n");
   10621 
   10622 	mutex_exit(sc->sc_core_lock);
   10623 
   10624 	if (icr & ICR_RXO) {
   10625 #if defined(WM_DEBUG)
   10626 		log(LOG_WARNING, "%s: Receive overrun\n",
   10627 		    device_xname(sc->sc_dev));
   10628 #endif /* defined(WM_DEBUG) */
   10629 	}
   10630 
   10631 	rnd_add_uint32(&sc->rnd_source, rndval);
   10632 
   10633 	if (more) {
   10634 		/* Try to get more packets going. */
   10635 		wm_legacy_intr_disable(sc);
   10636 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10637 		wm_sched_handle_queue(sc, wmq);
   10638 	}
   10639 
   10640 	return 1;
   10641 }
   10642 
   10643 static inline void
   10644 wm_txrxintr_disable(struct wm_queue *wmq)
   10645 {
   10646 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10647 
   10648 	if (__predict_false(!wm_is_using_msix(sc))) {
   10649 		wm_legacy_intr_disable(sc);
   10650 		return;
   10651 	}
   10652 
   10653 	if (sc->sc_type == WM_T_82574)
   10654 		CSR_WRITE(sc, WMREG_IMC,
   10655 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10656 	else if (sc->sc_type == WM_T_82575)
   10657 		CSR_WRITE(sc, WMREG_EIMC,
   10658 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10659 	else
   10660 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10661 }
   10662 
   10663 static inline void
   10664 wm_txrxintr_enable(struct wm_queue *wmq)
   10665 {
   10666 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10667 
   10668 	wm_itrs_calculate(sc, wmq);
   10669 
   10670 	if (__predict_false(!wm_is_using_msix(sc))) {
   10671 		wm_legacy_intr_enable(sc);
   10672 		return;
   10673 	}
   10674 
   10675 	/*
   10676 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10677 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10678 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10679 	 * while each wm_handle_queue(wmq) is runnig.
   10680 	 */
   10681 	if (sc->sc_type == WM_T_82574)
   10682 		CSR_WRITE(sc, WMREG_IMS,
   10683 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10684 	else if (sc->sc_type == WM_T_82575)
   10685 		CSR_WRITE(sc, WMREG_EIMS,
   10686 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10687 	else
   10688 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10689 }
   10690 
   10691 static int
   10692 wm_txrxintr_msix(void *arg)
   10693 {
   10694 	struct wm_queue *wmq = arg;
   10695 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10696 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10697 	struct wm_softc *sc = txq->txq_sc;
   10698 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10699 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10700 	bool txmore;
   10701 	bool rxmore;
   10702 
   10703 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10704 
   10705 	DPRINTF(sc, WM_DEBUG_TX,
   10706 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10707 
   10708 	wm_txrxintr_disable(wmq);
   10709 
   10710 	mutex_enter(txq->txq_lock);
   10711 
   10712 	if (txq->txq_stopping) {
   10713 		mutex_exit(txq->txq_lock);
   10714 		return 1;
   10715 	}
   10716 
   10717 	WM_Q_EVCNT_INCR(txq, txdw);
   10718 	if (txlimit > 0) {
   10719 		txmore = wm_txeof(txq, txlimit);
   10720 		/* wm_deferred start() is done in wm_handle_queue(). */
   10721 	} else
   10722 		txmore = true;
   10723 	mutex_exit(txq->txq_lock);
   10724 
   10725 	DPRINTF(sc, WM_DEBUG_RX,
   10726 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10727 	mutex_enter(rxq->rxq_lock);
   10728 
   10729 	if (rxq->rxq_stopping) {
   10730 		mutex_exit(rxq->rxq_lock);
   10731 		return 1;
   10732 	}
   10733 
   10734 	WM_Q_EVCNT_INCR(rxq, intr);
   10735 	if (rxlimit > 0) {
   10736 		rxmore = wm_rxeof(rxq, rxlimit);
   10737 	} else
   10738 		rxmore = true;
   10739 	mutex_exit(rxq->rxq_lock);
   10740 
   10741 	wm_itrs_writereg(sc, wmq);
   10742 
   10743 	if (txmore || rxmore) {
   10744 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10745 		wm_sched_handle_queue(sc, wmq);
   10746 	} else
   10747 		wm_txrxintr_enable(wmq);
   10748 
   10749 	return 1;
   10750 }
   10751 
   10752 static void
   10753 wm_handle_queue(void *arg)
   10754 {
   10755 	struct wm_queue *wmq = arg;
   10756 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10757 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10758 	struct wm_softc *sc = txq->txq_sc;
   10759 	u_int txlimit = sc->sc_tx_process_limit;
   10760 	u_int rxlimit = sc->sc_rx_process_limit;
   10761 	bool txmore;
   10762 	bool rxmore;
   10763 
   10764 	mutex_enter(txq->txq_lock);
   10765 	if (txq->txq_stopping) {
   10766 		mutex_exit(txq->txq_lock);
   10767 		return;
   10768 	}
   10769 	txmore = wm_txeof(txq, txlimit);
   10770 	wm_deferred_start_locked(txq);
   10771 	mutex_exit(txq->txq_lock);
   10772 
   10773 	mutex_enter(rxq->rxq_lock);
   10774 	if (rxq->rxq_stopping) {
   10775 		mutex_exit(rxq->rxq_lock);
   10776 		return;
   10777 	}
   10778 	WM_Q_EVCNT_INCR(rxq, defer);
   10779 	rxmore = wm_rxeof(rxq, rxlimit);
   10780 	mutex_exit(rxq->rxq_lock);
   10781 
   10782 	if (txmore || rxmore) {
   10783 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10784 		wm_sched_handle_queue(sc, wmq);
   10785 	} else
   10786 		wm_txrxintr_enable(wmq);
   10787 }
   10788 
   10789 static void
   10790 wm_handle_queue_work(struct work *wk, void *context)
   10791 {
   10792 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10793 
   10794 	/*
   10795 	 * Some qemu environment workaround.  They don't stop interrupt
   10796 	 * immediately.
   10797 	 */
   10798 	wmq->wmq_wq_enqueued = false;
   10799 	wm_handle_queue(wmq);
   10800 }
   10801 
   10802 /*
   10803  * wm_linkintr_msix:
   10804  *
   10805  *	Interrupt service routine for link status change for MSI-X.
   10806  */
   10807 static int
   10808 wm_linkintr_msix(void *arg)
   10809 {
   10810 	struct wm_softc *sc = arg;
   10811 	uint32_t reg;
   10812 	bool has_rxo;
   10813 
   10814 	reg = CSR_READ(sc, WMREG_ICR);
   10815 	mutex_enter(sc->sc_core_lock);
   10816 	DPRINTF(sc, WM_DEBUG_LINK,
   10817 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10818 		device_xname(sc->sc_dev), reg));
   10819 
   10820 	if (sc->sc_core_stopping)
   10821 		goto out;
   10822 
   10823 	if ((reg & ICR_LSC) != 0) {
   10824 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10825 		wm_linkintr(sc, ICR_LSC);
   10826 	}
   10827 	if ((reg & ICR_GPI(0)) != 0)
   10828 		device_printf(sc->sc_dev, "got module interrupt\n");
   10829 
   10830 	/*
   10831 	 * XXX 82574 MSI-X mode workaround
   10832 	 *
   10833 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10834 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10835 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10836 	 * interrupts by writing WMREG_ICS to process receive packets.
   10837 	 */
   10838 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10839 #if defined(WM_DEBUG)
   10840 		log(LOG_WARNING, "%s: Receive overrun\n",
   10841 		    device_xname(sc->sc_dev));
   10842 #endif /* defined(WM_DEBUG) */
   10843 
   10844 		has_rxo = true;
   10845 		/*
   10846 		 * The RXO interrupt is very high rate when receive traffic is
   10847 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10848 		 * interrupts. ICR_OTHER will be enabled at the end of
   10849 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10850 		 * ICR_RXQ(1) interrupts.
   10851 		 */
   10852 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10853 
   10854 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10855 	}
   10856 
   10857 
   10858 
   10859 out:
   10860 	mutex_exit(sc->sc_core_lock);
   10861 
   10862 	if (sc->sc_type == WM_T_82574) {
   10863 		if (!has_rxo)
   10864 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10865 		else
   10866 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10867 	} else if (sc->sc_type == WM_T_82575)
   10868 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10869 	else
   10870 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10871 
   10872 	return 1;
   10873 }
   10874 
   10875 /*
   10876  * Media related.
   10877  * GMII, SGMII, TBI (and SERDES)
   10878  */
   10879 
   10880 /* Common */
   10881 
   10882 /*
   10883  * wm_tbi_serdes_set_linkled:
   10884  *
   10885  *	Update the link LED on TBI and SERDES devices.
   10886  */
   10887 static void
   10888 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10889 {
   10890 
   10891 	if (sc->sc_tbi_linkup)
   10892 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10893 	else
   10894 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10895 
   10896 	/* 82540 or newer devices are active low */
   10897 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10898 
   10899 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10900 }
   10901 
   10902 /* GMII related */
   10903 
   10904 /*
   10905  * wm_gmii_reset:
   10906  *
   10907  *	Reset the PHY.
   10908  */
   10909 static void
   10910 wm_gmii_reset(struct wm_softc *sc)
   10911 {
   10912 	uint32_t reg;
   10913 	int rv;
   10914 
   10915 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10916 		device_xname(sc->sc_dev), __func__));
   10917 
   10918 	rv = sc->phy.acquire(sc);
   10919 	if (rv != 0) {
   10920 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10921 		    __func__);
   10922 		return;
   10923 	}
   10924 
   10925 	switch (sc->sc_type) {
   10926 	case WM_T_82542_2_0:
   10927 	case WM_T_82542_2_1:
   10928 		/* null */
   10929 		break;
   10930 	case WM_T_82543:
   10931 		/*
   10932 		 * With 82543, we need to force speed and duplex on the MAC
   10933 		 * equal to what the PHY speed and duplex configuration is.
   10934 		 * In addition, we need to perform a hardware reset on the PHY
   10935 		 * to take it out of reset.
   10936 		 */
   10937 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10938 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10939 
   10940 		/* The PHY reset pin is active-low. */
   10941 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10942 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10943 		    CTRL_EXT_SWDPIN(4));
   10944 		reg |= CTRL_EXT_SWDPIO(4);
   10945 
   10946 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10947 		CSR_WRITE_FLUSH(sc);
   10948 		delay(10*1000);
   10949 
   10950 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10951 		CSR_WRITE_FLUSH(sc);
   10952 		delay(150);
   10953 #if 0
   10954 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10955 #endif
   10956 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10957 		break;
   10958 	case WM_T_82544:	/* Reset 10000us */
   10959 	case WM_T_82540:
   10960 	case WM_T_82545:
   10961 	case WM_T_82545_3:
   10962 	case WM_T_82546:
   10963 	case WM_T_82546_3:
   10964 	case WM_T_82541:
   10965 	case WM_T_82541_2:
   10966 	case WM_T_82547:
   10967 	case WM_T_82547_2:
   10968 	case WM_T_82571:	/* Reset 100us */
   10969 	case WM_T_82572:
   10970 	case WM_T_82573:
   10971 	case WM_T_82574:
   10972 	case WM_T_82575:
   10973 	case WM_T_82576:
   10974 	case WM_T_82580:
   10975 	case WM_T_I350:
   10976 	case WM_T_I354:
   10977 	case WM_T_I210:
   10978 	case WM_T_I211:
   10979 	case WM_T_82583:
   10980 	case WM_T_80003:
   10981 		/* Generic reset */
   10982 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10983 		CSR_WRITE_FLUSH(sc);
   10984 		delay(20000);
   10985 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10986 		CSR_WRITE_FLUSH(sc);
   10987 		delay(20000);
   10988 
   10989 		if ((sc->sc_type == WM_T_82541)
   10990 		    || (sc->sc_type == WM_T_82541_2)
   10991 		    || (sc->sc_type == WM_T_82547)
   10992 		    || (sc->sc_type == WM_T_82547_2)) {
   10993 			/* Workaround for igp are done in igp_reset() */
   10994 			/* XXX add code to set LED after phy reset */
   10995 		}
   10996 		break;
   10997 	case WM_T_ICH8:
   10998 	case WM_T_ICH9:
   10999 	case WM_T_ICH10:
   11000 	case WM_T_PCH:
   11001 	case WM_T_PCH2:
   11002 	case WM_T_PCH_LPT:
   11003 	case WM_T_PCH_SPT:
   11004 	case WM_T_PCH_CNP:
   11005 		/* Generic reset */
   11006 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11007 		CSR_WRITE_FLUSH(sc);
   11008 		delay(100);
   11009 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11010 		CSR_WRITE_FLUSH(sc);
   11011 		delay(150);
   11012 		break;
   11013 	default:
   11014 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   11015 		    __func__);
   11016 		break;
   11017 	}
   11018 
   11019 	sc->phy.release(sc);
   11020 
   11021 	/* get_cfg_done */
   11022 	wm_get_cfg_done(sc);
   11023 
   11024 	/* Extra setup */
   11025 	switch (sc->sc_type) {
   11026 	case WM_T_82542_2_0:
   11027 	case WM_T_82542_2_1:
   11028 	case WM_T_82543:
   11029 	case WM_T_82544:
   11030 	case WM_T_82540:
   11031 	case WM_T_82545:
   11032 	case WM_T_82545_3:
   11033 	case WM_T_82546:
   11034 	case WM_T_82546_3:
   11035 	case WM_T_82541_2:
   11036 	case WM_T_82547_2:
   11037 	case WM_T_82571:
   11038 	case WM_T_82572:
   11039 	case WM_T_82573:
   11040 	case WM_T_82574:
   11041 	case WM_T_82583:
   11042 	case WM_T_82575:
   11043 	case WM_T_82576:
   11044 	case WM_T_82580:
   11045 	case WM_T_I350:
   11046 	case WM_T_I354:
   11047 	case WM_T_I210:
   11048 	case WM_T_I211:
   11049 	case WM_T_80003:
   11050 		/* Null */
   11051 		break;
   11052 	case WM_T_82541:
   11053 	case WM_T_82547:
   11054 		/* XXX Configure actively LED after PHY reset */
   11055 		break;
   11056 	case WM_T_ICH8:
   11057 	case WM_T_ICH9:
   11058 	case WM_T_ICH10:
   11059 	case WM_T_PCH:
   11060 	case WM_T_PCH2:
   11061 	case WM_T_PCH_LPT:
   11062 	case WM_T_PCH_SPT:
   11063 	case WM_T_PCH_CNP:
   11064 		wm_phy_post_reset(sc);
   11065 		break;
   11066 	default:
   11067 		panic("%s: unknown type\n", __func__);
   11068 		break;
   11069 	}
   11070 }
   11071 
   11072 /*
   11073  * Set up sc_phytype and mii_{read|write}reg.
   11074  *
   11075  *  To identify PHY type, correct read/write function should be selected.
   11076  * To select correct read/write function, PCI ID or MAC type are required
   11077  * without accessing PHY registers.
   11078  *
   11079  *  On the first call of this function, PHY ID is not known yet. Check
   11080  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   11081  * result might be incorrect.
   11082  *
   11083  *  In the second call, PHY OUI and model is used to identify PHY type.
   11084  * It might not be perfect because of the lack of compared entry, but it
   11085  * would be better than the first call.
   11086  *
   11087  *  If the detected new result and previous assumption is different,
   11088  * a diagnostic message will be printed.
   11089  */
   11090 static void
   11091 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   11092     uint16_t phy_model)
   11093 {
   11094 	device_t dev = sc->sc_dev;
   11095 	struct mii_data *mii = &sc->sc_mii;
   11096 	uint16_t new_phytype = WMPHY_UNKNOWN;
   11097 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   11098 	mii_readreg_t new_readreg;
   11099 	mii_writereg_t new_writereg;
   11100 	bool dodiag = true;
   11101 
   11102 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11103 		device_xname(sc->sc_dev), __func__));
   11104 
   11105 	/*
   11106 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   11107 	 * incorrect. So don't print diag output when it's 2nd call.
   11108 	 */
   11109 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   11110 		dodiag = false;
   11111 
   11112 	if (mii->mii_readreg == NULL) {
   11113 		/*
   11114 		 *  This is the first call of this function. For ICH and PCH
   11115 		 * variants, it's difficult to determine the PHY access method
   11116 		 * by sc_type, so use the PCI product ID for some devices.
   11117 		 */
   11118 
   11119 		switch (sc->sc_pcidevid) {
   11120 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   11121 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   11122 			/* 82577 */
   11123 			new_phytype = WMPHY_82577;
   11124 			break;
   11125 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   11126 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   11127 			/* 82578 */
   11128 			new_phytype = WMPHY_82578;
   11129 			break;
   11130 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   11131 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   11132 			/* 82579 */
   11133 			new_phytype = WMPHY_82579;
   11134 			break;
   11135 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   11136 		case PCI_PRODUCT_INTEL_82801I_BM:
   11137 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   11138 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   11139 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   11140 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   11141 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   11142 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   11143 			/* ICH8, 9, 10 with 82567 */
   11144 			new_phytype = WMPHY_BM;
   11145 			break;
   11146 		default:
   11147 			break;
   11148 		}
   11149 	} else {
   11150 		/* It's not the first call. Use PHY OUI and model */
   11151 		switch (phy_oui) {
   11152 		case MII_OUI_ATTANSIC: /* atphy(4) */
   11153 			switch (phy_model) {
   11154 			case MII_MODEL_ATTANSIC_AR8021:
   11155 				new_phytype = WMPHY_82578;
   11156 				break;
   11157 			default:
   11158 				break;
   11159 			}
   11160 			break;
   11161 		case MII_OUI_xxMARVELL:
   11162 			switch (phy_model) {
   11163 			case MII_MODEL_xxMARVELL_I210:
   11164 				new_phytype = WMPHY_I210;
   11165 				break;
   11166 			case MII_MODEL_xxMARVELL_E1011:
   11167 			case MII_MODEL_xxMARVELL_E1000_3:
   11168 			case MII_MODEL_xxMARVELL_E1000_5:
   11169 			case MII_MODEL_xxMARVELL_E1112:
   11170 				new_phytype = WMPHY_M88;
   11171 				break;
   11172 			case MII_MODEL_xxMARVELL_E1149:
   11173 				new_phytype = WMPHY_BM;
   11174 				break;
   11175 			case MII_MODEL_xxMARVELL_E1111:
   11176 			case MII_MODEL_xxMARVELL_I347:
   11177 			case MII_MODEL_xxMARVELL_E1512:
   11178 			case MII_MODEL_xxMARVELL_E1340M:
   11179 			case MII_MODEL_xxMARVELL_E1543:
   11180 				new_phytype = WMPHY_M88;
   11181 				break;
   11182 			case MII_MODEL_xxMARVELL_I82563:
   11183 				new_phytype = WMPHY_GG82563;
   11184 				break;
   11185 			default:
   11186 				break;
   11187 			}
   11188 			break;
   11189 		case MII_OUI_INTEL:
   11190 			switch (phy_model) {
   11191 			case MII_MODEL_INTEL_I82577:
   11192 				new_phytype = WMPHY_82577;
   11193 				break;
   11194 			case MII_MODEL_INTEL_I82579:
   11195 				new_phytype = WMPHY_82579;
   11196 				break;
   11197 			case MII_MODEL_INTEL_I217:
   11198 				new_phytype = WMPHY_I217;
   11199 				break;
   11200 			case MII_MODEL_INTEL_I82580:
   11201 				new_phytype = WMPHY_82580;
   11202 				break;
   11203 			case MII_MODEL_INTEL_I350:
   11204 				new_phytype = WMPHY_I350;
   11205 				break;
   11206 			default:
   11207 				break;
   11208 			}
   11209 			break;
   11210 		case MII_OUI_yyINTEL:
   11211 			switch (phy_model) {
   11212 			case MII_MODEL_yyINTEL_I82562G:
   11213 			case MII_MODEL_yyINTEL_I82562EM:
   11214 			case MII_MODEL_yyINTEL_I82562ET:
   11215 				new_phytype = WMPHY_IFE;
   11216 				break;
   11217 			case MII_MODEL_yyINTEL_IGP01E1000:
   11218 				new_phytype = WMPHY_IGP;
   11219 				break;
   11220 			case MII_MODEL_yyINTEL_I82566:
   11221 				new_phytype = WMPHY_IGP_3;
   11222 				break;
   11223 			default:
   11224 				break;
   11225 			}
   11226 			break;
   11227 		default:
   11228 			break;
   11229 		}
   11230 
   11231 		if (dodiag) {
   11232 			if (new_phytype == WMPHY_UNKNOWN)
   11233 				aprint_verbose_dev(dev,
   11234 				    "%s: Unknown PHY model. OUI=%06x, "
   11235 				    "model=%04x\n", __func__, phy_oui,
   11236 				    phy_model);
   11237 
   11238 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11239 			    && (sc->sc_phytype != new_phytype)) {
   11240 				aprint_error_dev(dev, "Previously assumed PHY "
   11241 				    "type(%u) was incorrect. PHY type from PHY"
   11242 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11243 			}
   11244 		}
   11245 	}
   11246 
   11247 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11248 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11249 		/* SGMII */
   11250 		new_readreg = wm_sgmii_readreg;
   11251 		new_writereg = wm_sgmii_writereg;
   11252 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11253 		/* BM2 (phyaddr == 1) */
   11254 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11255 		    && (new_phytype != WMPHY_BM)
   11256 		    && (new_phytype != WMPHY_UNKNOWN))
   11257 			doubt_phytype = new_phytype;
   11258 		new_phytype = WMPHY_BM;
   11259 		new_readreg = wm_gmii_bm_readreg;
   11260 		new_writereg = wm_gmii_bm_writereg;
   11261 	} else if (sc->sc_type >= WM_T_PCH) {
   11262 		/* All PCH* use _hv_ */
   11263 		new_readreg = wm_gmii_hv_readreg;
   11264 		new_writereg = wm_gmii_hv_writereg;
   11265 	} else if (sc->sc_type >= WM_T_ICH8) {
   11266 		/* non-82567 ICH8, 9 and 10 */
   11267 		new_readreg = wm_gmii_i82544_readreg;
   11268 		new_writereg = wm_gmii_i82544_writereg;
   11269 	} else if (sc->sc_type >= WM_T_80003) {
   11270 		/* 80003 */
   11271 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11272 		    && (new_phytype != WMPHY_GG82563)
   11273 		    && (new_phytype != WMPHY_UNKNOWN))
   11274 			doubt_phytype = new_phytype;
   11275 		new_phytype = WMPHY_GG82563;
   11276 		new_readreg = wm_gmii_i80003_readreg;
   11277 		new_writereg = wm_gmii_i80003_writereg;
   11278 	} else if (sc->sc_type >= WM_T_I210) {
   11279 		/* I210 and I211 */
   11280 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11281 		    && (new_phytype != WMPHY_I210)
   11282 		    && (new_phytype != WMPHY_UNKNOWN))
   11283 			doubt_phytype = new_phytype;
   11284 		new_phytype = WMPHY_I210;
   11285 		new_readreg = wm_gmii_gs40g_readreg;
   11286 		new_writereg = wm_gmii_gs40g_writereg;
   11287 	} else if (sc->sc_type >= WM_T_82580) {
   11288 		/* 82580, I350 and I354 */
   11289 		new_readreg = wm_gmii_82580_readreg;
   11290 		new_writereg = wm_gmii_82580_writereg;
   11291 	} else if (sc->sc_type >= WM_T_82544) {
   11292 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11293 		new_readreg = wm_gmii_i82544_readreg;
   11294 		new_writereg = wm_gmii_i82544_writereg;
   11295 	} else {
   11296 		new_readreg = wm_gmii_i82543_readreg;
   11297 		new_writereg = wm_gmii_i82543_writereg;
   11298 	}
   11299 
   11300 	if (new_phytype == WMPHY_BM) {
   11301 		/* All BM use _bm_ */
   11302 		new_readreg = wm_gmii_bm_readreg;
   11303 		new_writereg = wm_gmii_bm_writereg;
   11304 	}
   11305 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11306 		/* All PCH* use _hv_ */
   11307 		new_readreg = wm_gmii_hv_readreg;
   11308 		new_writereg = wm_gmii_hv_writereg;
   11309 	}
   11310 
   11311 	/* Diag output */
   11312 	if (dodiag) {
   11313 		if (doubt_phytype != WMPHY_UNKNOWN)
   11314 			aprint_error_dev(dev, "Assumed new PHY type was "
   11315 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11316 			    new_phytype);
   11317 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11318 		    && (sc->sc_phytype != new_phytype))
   11319 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11320 			    "was incorrect. New PHY type = %u\n",
   11321 			    sc->sc_phytype, new_phytype);
   11322 
   11323 		if ((mii->mii_readreg != NULL) &&
   11324 		    (new_phytype == WMPHY_UNKNOWN))
   11325 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11326 
   11327 		if ((mii->mii_readreg != NULL) &&
   11328 		    (mii->mii_readreg != new_readreg))
   11329 			aprint_error_dev(dev, "Previously assumed PHY "
   11330 			    "read/write function was incorrect.\n");
   11331 	}
   11332 
   11333 	/* Update now */
   11334 	sc->sc_phytype = new_phytype;
   11335 	mii->mii_readreg = new_readreg;
   11336 	mii->mii_writereg = new_writereg;
   11337 	if (new_readreg == wm_gmii_hv_readreg) {
   11338 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11339 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11340 	} else if (new_readreg == wm_sgmii_readreg) {
   11341 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11342 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11343 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11344 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11345 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11346 	}
   11347 }
   11348 
   11349 /*
   11350  * wm_get_phy_id_82575:
   11351  *
   11352  * Return PHY ID. Return -1 if it failed.
   11353  */
   11354 static int
   11355 wm_get_phy_id_82575(struct wm_softc *sc)
   11356 {
   11357 	uint32_t reg;
   11358 	int phyid = -1;
   11359 
   11360 	/* XXX */
   11361 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11362 		return -1;
   11363 
   11364 	if (wm_sgmii_uses_mdio(sc)) {
   11365 		switch (sc->sc_type) {
   11366 		case WM_T_82575:
   11367 		case WM_T_82576:
   11368 			reg = CSR_READ(sc, WMREG_MDIC);
   11369 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11370 			break;
   11371 		case WM_T_82580:
   11372 		case WM_T_I350:
   11373 		case WM_T_I354:
   11374 		case WM_T_I210:
   11375 		case WM_T_I211:
   11376 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11377 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11378 			break;
   11379 		default:
   11380 			return -1;
   11381 		}
   11382 	}
   11383 
   11384 	return phyid;
   11385 }
   11386 
   11387 /*
   11388  * wm_gmii_mediainit:
   11389  *
   11390  *	Initialize media for use on 1000BASE-T devices.
   11391  */
   11392 static void
   11393 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11394 {
   11395 	device_t dev = sc->sc_dev;
   11396 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11397 	struct mii_data *mii = &sc->sc_mii;
   11398 
   11399 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11400 		device_xname(sc->sc_dev), __func__));
   11401 
   11402 	/* We have GMII. */
   11403 	sc->sc_flags |= WM_F_HAS_MII;
   11404 
   11405 	if (sc->sc_type == WM_T_80003)
   11406 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11407 	else
   11408 		sc->sc_tipg = TIPG_1000T_DFLT;
   11409 
   11410 	/*
   11411 	 * Let the chip set speed/duplex on its own based on
   11412 	 * signals from the PHY.
   11413 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11414 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11415 	 */
   11416 	sc->sc_ctrl |= CTRL_SLU;
   11417 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11418 
   11419 	/* Initialize our media structures and probe the GMII. */
   11420 	mii->mii_ifp = ifp;
   11421 
   11422 	mii->mii_statchg = wm_gmii_statchg;
   11423 
   11424 	/* get PHY control from SMBus to PCIe */
   11425 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11426 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11427 	    || (sc->sc_type == WM_T_PCH_CNP))
   11428 		wm_init_phy_workarounds_pchlan(sc);
   11429 
   11430 	wm_gmii_reset(sc);
   11431 
   11432 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11433 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11434 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11435 
   11436 	/* Setup internal SGMII PHY for SFP */
   11437 	wm_sgmii_sfp_preconfig(sc);
   11438 
   11439 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11440 	    || (sc->sc_type == WM_T_82580)
   11441 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11442 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11443 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11444 			/* Attach only one port */
   11445 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11446 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11447 		} else {
   11448 			int i, id;
   11449 			uint32_t ctrl_ext;
   11450 
   11451 			id = wm_get_phy_id_82575(sc);
   11452 			if (id != -1) {
   11453 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11454 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11455 			}
   11456 			if ((id == -1)
   11457 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11458 				/* Power on sgmii phy if it is disabled */
   11459 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11460 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11461 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11462 				CSR_WRITE_FLUSH(sc);
   11463 				delay(300*1000); /* XXX too long */
   11464 
   11465 				/*
   11466 				 * From 1 to 8.
   11467 				 *
   11468 				 * I2C access fails with I2C register's ERROR
   11469 				 * bit set, so prevent error message while
   11470 				 * scanning.
   11471 				 */
   11472 				sc->phy.no_errprint = true;
   11473 				for (i = 1; i < 8; i++)
   11474 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11475 					    0xffffffff, i, MII_OFFSET_ANY,
   11476 					    MIIF_DOPAUSE);
   11477 				sc->phy.no_errprint = false;
   11478 
   11479 				/* Restore previous sfp cage power state */
   11480 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11481 			}
   11482 		}
   11483 	} else
   11484 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11485 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11486 
   11487 	/*
   11488 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11489 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11490 	 */
   11491 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11492 		|| (sc->sc_type == WM_T_PCH_SPT)
   11493 		|| (sc->sc_type == WM_T_PCH_CNP))
   11494 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11495 		wm_set_mdio_slow_mode_hv(sc);
   11496 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11497 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11498 	}
   11499 
   11500 	/*
   11501 	 * (For ICH8 variants)
   11502 	 * If PHY detection failed, use BM's r/w function and retry.
   11503 	 */
   11504 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11505 		/* if failed, retry with *_bm_* */
   11506 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11507 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11508 		    sc->sc_phytype);
   11509 		sc->sc_phytype = WMPHY_BM;
   11510 		mii->mii_readreg = wm_gmii_bm_readreg;
   11511 		mii->mii_writereg = wm_gmii_bm_writereg;
   11512 
   11513 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11514 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11515 	}
   11516 
   11517 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11518 		/* Any PHY wasn't found */
   11519 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11520 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11521 		sc->sc_phytype = WMPHY_NONE;
   11522 	} else {
   11523 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11524 
   11525 		/*
   11526 		 * PHY found! Check PHY type again by the second call of
   11527 		 * wm_gmii_setup_phytype.
   11528 		 */
   11529 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11530 		    child->mii_mpd_model);
   11531 
   11532 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11533 	}
   11534 }
   11535 
   11536 /*
   11537  * wm_gmii_mediachange:	[ifmedia interface function]
   11538  *
   11539  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11540  */
   11541 static int
   11542 wm_gmii_mediachange(struct ifnet *ifp)
   11543 {
   11544 	struct wm_softc *sc = ifp->if_softc;
   11545 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11546 	uint32_t reg;
   11547 	int rc;
   11548 
   11549 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11550 		device_xname(sc->sc_dev), __func__));
   11551 
   11552 	KASSERT(mutex_owned(sc->sc_core_lock));
   11553 
   11554 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11555 		return 0;
   11556 
   11557 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11558 	if ((sc->sc_type == WM_T_82580)
   11559 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11560 	    || (sc->sc_type == WM_T_I211)) {
   11561 		reg = CSR_READ(sc, WMREG_PHPM);
   11562 		reg &= ~PHPM_GO_LINK_D;
   11563 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11564 	}
   11565 
   11566 	/* Disable D0 LPLU. */
   11567 	wm_lplu_d0_disable(sc);
   11568 
   11569 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11570 	sc->sc_ctrl |= CTRL_SLU;
   11571 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11572 	    || (sc->sc_type > WM_T_82543)) {
   11573 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11574 	} else {
   11575 		sc->sc_ctrl &= ~CTRL_ASDE;
   11576 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11577 		if (ife->ifm_media & IFM_FDX)
   11578 			sc->sc_ctrl |= CTRL_FD;
   11579 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11580 		case IFM_10_T:
   11581 			sc->sc_ctrl |= CTRL_SPEED_10;
   11582 			break;
   11583 		case IFM_100_TX:
   11584 			sc->sc_ctrl |= CTRL_SPEED_100;
   11585 			break;
   11586 		case IFM_1000_T:
   11587 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11588 			break;
   11589 		case IFM_NONE:
   11590 			/* There is no specific setting for IFM_NONE */
   11591 			break;
   11592 		default:
   11593 			panic("wm_gmii_mediachange: bad media 0x%x",
   11594 			    ife->ifm_media);
   11595 		}
   11596 	}
   11597 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11598 	CSR_WRITE_FLUSH(sc);
   11599 
   11600 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11601 		wm_serdes_mediachange(ifp);
   11602 
   11603 	if (sc->sc_type <= WM_T_82543)
   11604 		wm_gmii_reset(sc);
   11605 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11606 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11607 		/* allow time for SFP cage time to power up phy */
   11608 		delay(300 * 1000);
   11609 		wm_gmii_reset(sc);
   11610 	}
   11611 
   11612 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11613 		return 0;
   11614 	return rc;
   11615 }
   11616 
   11617 /*
   11618  * wm_gmii_mediastatus:	[ifmedia interface function]
   11619  *
   11620  *	Get the current interface media status on a 1000BASE-T device.
   11621  */
   11622 static void
   11623 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11624 {
   11625 	struct wm_softc *sc = ifp->if_softc;
   11626 
   11627 	KASSERT(mutex_owned(sc->sc_core_lock));
   11628 
   11629 	ether_mediastatus(ifp, ifmr);
   11630 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11631 	    | sc->sc_flowflags;
   11632 }
   11633 
   11634 #define	MDI_IO		CTRL_SWDPIN(2)
   11635 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11636 #define	MDI_CLK		CTRL_SWDPIN(3)
   11637 
   11638 static void
   11639 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11640 {
   11641 	uint32_t i, v;
   11642 
   11643 	v = CSR_READ(sc, WMREG_CTRL);
   11644 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11645 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11646 
   11647 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11648 		if (data & i)
   11649 			v |= MDI_IO;
   11650 		else
   11651 			v &= ~MDI_IO;
   11652 		CSR_WRITE(sc, WMREG_CTRL, v);
   11653 		CSR_WRITE_FLUSH(sc);
   11654 		delay(10);
   11655 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11656 		CSR_WRITE_FLUSH(sc);
   11657 		delay(10);
   11658 		CSR_WRITE(sc, WMREG_CTRL, v);
   11659 		CSR_WRITE_FLUSH(sc);
   11660 		delay(10);
   11661 	}
   11662 }
   11663 
   11664 static uint16_t
   11665 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11666 {
   11667 	uint32_t v, i;
   11668 	uint16_t data = 0;
   11669 
   11670 	v = CSR_READ(sc, WMREG_CTRL);
   11671 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11672 	v |= CTRL_SWDPIO(3);
   11673 
   11674 	CSR_WRITE(sc, WMREG_CTRL, v);
   11675 	CSR_WRITE_FLUSH(sc);
   11676 	delay(10);
   11677 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11678 	CSR_WRITE_FLUSH(sc);
   11679 	delay(10);
   11680 	CSR_WRITE(sc, WMREG_CTRL, v);
   11681 	CSR_WRITE_FLUSH(sc);
   11682 	delay(10);
   11683 
   11684 	for (i = 0; i < 16; i++) {
   11685 		data <<= 1;
   11686 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11687 		CSR_WRITE_FLUSH(sc);
   11688 		delay(10);
   11689 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11690 			data |= 1;
   11691 		CSR_WRITE(sc, WMREG_CTRL, v);
   11692 		CSR_WRITE_FLUSH(sc);
   11693 		delay(10);
   11694 	}
   11695 
   11696 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11697 	CSR_WRITE_FLUSH(sc);
   11698 	delay(10);
   11699 	CSR_WRITE(sc, WMREG_CTRL, v);
   11700 	CSR_WRITE_FLUSH(sc);
   11701 	delay(10);
   11702 
   11703 	return data;
   11704 }
   11705 
   11706 #undef MDI_IO
   11707 #undef MDI_DIR
   11708 #undef MDI_CLK
   11709 
   11710 /*
   11711  * wm_gmii_i82543_readreg:	[mii interface function]
   11712  *
   11713  *	Read a PHY register on the GMII (i82543 version).
   11714  */
   11715 static int
   11716 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11717 {
   11718 	struct wm_softc *sc = device_private(dev);
   11719 
   11720 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11721 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11722 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11723 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11724 
   11725 	DPRINTF(sc, WM_DEBUG_GMII,
   11726 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11727 		device_xname(dev), phy, reg, *val));
   11728 
   11729 	return 0;
   11730 }
   11731 
   11732 /*
   11733  * wm_gmii_i82543_writereg:	[mii interface function]
   11734  *
   11735  *	Write a PHY register on the GMII (i82543 version).
   11736  */
   11737 static int
   11738 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11739 {
   11740 	struct wm_softc *sc = device_private(dev);
   11741 
   11742 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11743 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11744 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11745 	    (MII_COMMAND_START << 30), 32);
   11746 
   11747 	return 0;
   11748 }
   11749 
   11750 /*
   11751  * wm_gmii_mdic_readreg:	[mii interface function]
   11752  *
   11753  *	Read a PHY register on the GMII.
   11754  */
   11755 static int
   11756 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11757 {
   11758 	struct wm_softc *sc = device_private(dev);
   11759 	uint32_t mdic = 0;
   11760 	int i;
   11761 
   11762 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11763 	    && (reg > MII_ADDRMASK)) {
   11764 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11765 		    __func__, sc->sc_phytype, reg);
   11766 		reg &= MII_ADDRMASK;
   11767 	}
   11768 
   11769 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11770 	    MDIC_REGADD(reg));
   11771 
   11772 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11773 		delay(50);
   11774 		mdic = CSR_READ(sc, WMREG_MDIC);
   11775 		if (mdic & MDIC_READY)
   11776 			break;
   11777 	}
   11778 
   11779 	if ((mdic & MDIC_READY) == 0) {
   11780 		DPRINTF(sc, WM_DEBUG_GMII,
   11781 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11782 			device_xname(dev), phy, reg));
   11783 		return ETIMEDOUT;
   11784 	} else if (mdic & MDIC_E) {
   11785 		/* This is normal if no PHY is present. */
   11786 		DPRINTF(sc, WM_DEBUG_GMII,
   11787 		    ("%s: MDIC read error: phy %d reg %d\n",
   11788 			device_xname(sc->sc_dev), phy, reg));
   11789 		return -1;
   11790 	} else
   11791 		*val = MDIC_DATA(mdic);
   11792 
   11793 	/*
   11794 	 * Allow some time after each MDIC transaction to avoid
   11795 	 * reading duplicate data in the next MDIC transaction.
   11796 	 */
   11797 	if (sc->sc_type == WM_T_PCH2)
   11798 		delay(100);
   11799 
   11800 	return 0;
   11801 }
   11802 
   11803 /*
   11804  * wm_gmii_mdic_writereg:	[mii interface function]
   11805  *
   11806  *	Write a PHY register on the GMII.
   11807  */
   11808 static int
   11809 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11810 {
   11811 	struct wm_softc *sc = device_private(dev);
   11812 	uint32_t mdic = 0;
   11813 	int i;
   11814 
   11815 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11816 	    && (reg > MII_ADDRMASK)) {
   11817 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11818 		    __func__, sc->sc_phytype, reg);
   11819 		reg &= MII_ADDRMASK;
   11820 	}
   11821 
   11822 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11823 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11824 
   11825 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11826 		delay(50);
   11827 		mdic = CSR_READ(sc, WMREG_MDIC);
   11828 		if (mdic & MDIC_READY)
   11829 			break;
   11830 	}
   11831 
   11832 	if ((mdic & MDIC_READY) == 0) {
   11833 		DPRINTF(sc, WM_DEBUG_GMII,
   11834 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11835 			device_xname(dev), phy, reg));
   11836 		return ETIMEDOUT;
   11837 	} else if (mdic & MDIC_E) {
   11838 		DPRINTF(sc, WM_DEBUG_GMII,
   11839 		    ("%s: MDIC write error: phy %d reg %d\n",
   11840 			device_xname(dev), phy, reg));
   11841 		return -1;
   11842 	}
   11843 
   11844 	/*
   11845 	 * Allow some time after each MDIC transaction to avoid
   11846 	 * reading duplicate data in the next MDIC transaction.
   11847 	 */
   11848 	if (sc->sc_type == WM_T_PCH2)
   11849 		delay(100);
   11850 
   11851 	return 0;
   11852 }
   11853 
   11854 /*
   11855  * wm_gmii_i82544_readreg:	[mii interface function]
   11856  *
   11857  *	Read a PHY register on the GMII.
   11858  */
   11859 static int
   11860 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11861 {
   11862 	struct wm_softc *sc = device_private(dev);
   11863 	int rv;
   11864 
   11865 	rv = sc->phy.acquire(sc);
   11866 	if (rv != 0) {
   11867 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11868 		return rv;
   11869 	}
   11870 
   11871 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11872 
   11873 	sc->phy.release(sc);
   11874 
   11875 	return rv;
   11876 }
   11877 
   11878 static int
   11879 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11880 {
   11881 	struct wm_softc *sc = device_private(dev);
   11882 	int rv;
   11883 
   11884 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11885 		switch (sc->sc_phytype) {
   11886 		case WMPHY_IGP:
   11887 		case WMPHY_IGP_2:
   11888 		case WMPHY_IGP_3:
   11889 			rv = wm_gmii_mdic_writereg(dev, phy,
   11890 			    IGPHY_PAGE_SELECT, reg);
   11891 			if (rv != 0)
   11892 				return rv;
   11893 			break;
   11894 		default:
   11895 #ifdef WM_DEBUG
   11896 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11897 			    __func__, sc->sc_phytype, reg);
   11898 #endif
   11899 			break;
   11900 		}
   11901 	}
   11902 
   11903 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11904 }
   11905 
   11906 /*
   11907  * wm_gmii_i82544_writereg:	[mii interface function]
   11908  *
   11909  *	Write a PHY register on the GMII.
   11910  */
   11911 static int
   11912 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11913 {
   11914 	struct wm_softc *sc = device_private(dev);
   11915 	int rv;
   11916 
   11917 	rv = sc->phy.acquire(sc);
   11918 	if (rv != 0) {
   11919 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11920 		return rv;
   11921 	}
   11922 
   11923 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11924 	sc->phy.release(sc);
   11925 
   11926 	return rv;
   11927 }
   11928 
   11929 static int
   11930 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11931 {
   11932 	struct wm_softc *sc = device_private(dev);
   11933 	int rv;
   11934 
   11935 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11936 		switch (sc->sc_phytype) {
   11937 		case WMPHY_IGP:
   11938 		case WMPHY_IGP_2:
   11939 		case WMPHY_IGP_3:
   11940 			rv = wm_gmii_mdic_writereg(dev, phy,
   11941 			    IGPHY_PAGE_SELECT, reg);
   11942 			if (rv != 0)
   11943 				return rv;
   11944 			break;
   11945 		default:
   11946 #ifdef WM_DEBUG
   11947 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11948 			    __func__, sc->sc_phytype, reg);
   11949 #endif
   11950 			break;
   11951 		}
   11952 	}
   11953 
   11954 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11955 }
   11956 
   11957 /*
   11958  * wm_gmii_i80003_readreg:	[mii interface function]
   11959  *
   11960  *	Read a PHY register on the kumeran
   11961  * This could be handled by the PHY layer if we didn't have to lock the
   11962  * resource ...
   11963  */
   11964 static int
   11965 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11966 {
   11967 	struct wm_softc *sc = device_private(dev);
   11968 	int page_select;
   11969 	uint16_t temp, temp2;
   11970 	int rv;
   11971 
   11972 	if (phy != 1) /* Only one PHY on kumeran bus */
   11973 		return -1;
   11974 
   11975 	rv = sc->phy.acquire(sc);
   11976 	if (rv != 0) {
   11977 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11978 		return rv;
   11979 	}
   11980 
   11981 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11982 		page_select = GG82563_PHY_PAGE_SELECT;
   11983 	else {
   11984 		/*
   11985 		 * Use Alternative Page Select register to access registers
   11986 		 * 30 and 31.
   11987 		 */
   11988 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11989 	}
   11990 	temp = reg >> GG82563_PAGE_SHIFT;
   11991 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11992 		goto out;
   11993 
   11994 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11995 		/*
   11996 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11997 		 * register.
   11998 		 */
   11999 		delay(200);
   12000 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12001 		if ((rv != 0) || (temp2 != temp)) {
   12002 			device_printf(dev, "%s failed\n", __func__);
   12003 			rv = -1;
   12004 			goto out;
   12005 		}
   12006 		delay(200);
   12007 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12008 		delay(200);
   12009 	} else
   12010 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12011 
   12012 out:
   12013 	sc->phy.release(sc);
   12014 	return rv;
   12015 }
   12016 
   12017 /*
   12018  * wm_gmii_i80003_writereg:	[mii interface function]
   12019  *
   12020  *	Write a PHY register on the kumeran.
   12021  * This could be handled by the PHY layer if we didn't have to lock the
   12022  * resource ...
   12023  */
   12024 static int
   12025 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   12026 {
   12027 	struct wm_softc *sc = device_private(dev);
   12028 	int page_select, rv;
   12029 	uint16_t temp, temp2;
   12030 
   12031 	if (phy != 1) /* Only one PHY on kumeran bus */
   12032 		return -1;
   12033 
   12034 	rv = sc->phy.acquire(sc);
   12035 	if (rv != 0) {
   12036 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12037 		return rv;
   12038 	}
   12039 
   12040 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   12041 		page_select = GG82563_PHY_PAGE_SELECT;
   12042 	else {
   12043 		/*
   12044 		 * Use Alternative Page Select register to access registers
   12045 		 * 30 and 31.
   12046 		 */
   12047 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   12048 	}
   12049 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   12050 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   12051 		goto out;
   12052 
   12053 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   12054 		/*
   12055 		 * Wait more 200us for a bug of the ready bit in the MDIC
   12056 		 * register.
   12057 		 */
   12058 		delay(200);
   12059 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   12060 		if ((rv != 0) || (temp2 != temp)) {
   12061 			device_printf(dev, "%s failed\n", __func__);
   12062 			rv = -1;
   12063 			goto out;
   12064 		}
   12065 		delay(200);
   12066 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12067 		delay(200);
   12068 	} else
   12069 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12070 
   12071 out:
   12072 	sc->phy.release(sc);
   12073 	return rv;
   12074 }
   12075 
   12076 /*
   12077  * wm_gmii_bm_readreg:	[mii interface function]
   12078  *
   12079  *	Read a PHY register on the kumeran
   12080  * This could be handled by the PHY layer if we didn't have to lock the
   12081  * resource ...
   12082  */
   12083 static int
   12084 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12085 {
   12086 	struct wm_softc *sc = device_private(dev);
   12087 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12088 	int rv;
   12089 
   12090 	rv = sc->phy.acquire(sc);
   12091 	if (rv != 0) {
   12092 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12093 		return rv;
   12094 	}
   12095 
   12096 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12097 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12098 		    || (reg == 31)) ? 1 : phy;
   12099 	/* Page 800 works differently than the rest so it has its own func */
   12100 	if (page == BM_WUC_PAGE) {
   12101 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12102 		goto release;
   12103 	}
   12104 
   12105 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12106 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12107 		    && (sc->sc_type != WM_T_82583))
   12108 			rv = wm_gmii_mdic_writereg(dev, phy,
   12109 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12110 		else
   12111 			rv = wm_gmii_mdic_writereg(dev, phy,
   12112 			    BME1000_PHY_PAGE_SELECT, page);
   12113 		if (rv != 0)
   12114 			goto release;
   12115 	}
   12116 
   12117 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   12118 
   12119 release:
   12120 	sc->phy.release(sc);
   12121 	return rv;
   12122 }
   12123 
   12124 /*
   12125  * wm_gmii_bm_writereg:	[mii interface function]
   12126  *
   12127  *	Write a PHY register on the kumeran.
   12128  * This could be handled by the PHY layer if we didn't have to lock the
   12129  * resource ...
   12130  */
   12131 static int
   12132 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   12133 {
   12134 	struct wm_softc *sc = device_private(dev);
   12135 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   12136 	int rv;
   12137 
   12138 	rv = sc->phy.acquire(sc);
   12139 	if (rv != 0) {
   12140 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12141 		return rv;
   12142 	}
   12143 
   12144 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   12145 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   12146 		    || (reg == 31)) ? 1 : phy;
   12147 	/* Page 800 works differently than the rest so it has its own func */
   12148 	if (page == BM_WUC_PAGE) {
   12149 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   12150 		goto release;
   12151 	}
   12152 
   12153 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   12154 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   12155 		    && (sc->sc_type != WM_T_82583))
   12156 			rv = wm_gmii_mdic_writereg(dev, phy,
   12157 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12158 		else
   12159 			rv = wm_gmii_mdic_writereg(dev, phy,
   12160 			    BME1000_PHY_PAGE_SELECT, page);
   12161 		if (rv != 0)
   12162 			goto release;
   12163 	}
   12164 
   12165 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   12166 
   12167 release:
   12168 	sc->phy.release(sc);
   12169 	return rv;
   12170 }
   12171 
   12172 /*
   12173  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   12174  *  @dev: pointer to the HW structure
   12175  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   12176  *
   12177  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   12178  *  address to store contents of the BM_WUC_ENABLE_REG register.
   12179  */
   12180 static int
   12181 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12182 {
   12183 #ifdef WM_DEBUG
   12184 	struct wm_softc *sc = device_private(dev);
   12185 #endif
   12186 	uint16_t temp;
   12187 	int rv;
   12188 
   12189 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12190 		device_xname(dev), __func__));
   12191 
   12192 	if (!phy_regp)
   12193 		return -1;
   12194 
   12195 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   12196 
   12197 	/* Select Port Control Registers page */
   12198 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12199 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12200 	if (rv != 0)
   12201 		return rv;
   12202 
   12203 	/* Read WUCE and save it */
   12204 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   12205 	if (rv != 0)
   12206 		return rv;
   12207 
   12208 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   12209 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   12210 	 */
   12211 	temp = *phy_regp;
   12212 	temp |= BM_WUC_ENABLE_BIT;
   12213 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   12214 
   12215 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   12216 		return rv;
   12217 
   12218 	/* Select Host Wakeup Registers page - caller now able to write
   12219 	 * registers on the Wakeup registers page
   12220 	 */
   12221 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12222 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12223 }
   12224 
   12225 /*
   12226  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12227  *  @dev: pointer to the HW structure
   12228  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12229  *
   12230  *  Restore BM_WUC_ENABLE_REG to its original value.
   12231  *
   12232  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12233  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12234  *  caller.
   12235  */
   12236 static int
   12237 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12238 {
   12239 #ifdef WM_DEBUG
   12240 	struct wm_softc *sc = device_private(dev);
   12241 #endif
   12242 
   12243 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12244 		device_xname(dev), __func__));
   12245 
   12246 	if (!phy_regp)
   12247 		return -1;
   12248 
   12249 	/* Select Port Control Registers page */
   12250 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12251 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12252 
   12253 	/* Restore 769.17 to its original value */
   12254 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12255 
   12256 	return 0;
   12257 }
   12258 
   12259 /*
   12260  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12261  *  @sc: pointer to the HW structure
   12262  *  @offset: register offset to be read or written
   12263  *  @val: pointer to the data to read or write
   12264  *  @rd: determines if operation is read or write
   12265  *  @page_set: BM_WUC_PAGE already set and access enabled
   12266  *
   12267  *  Read the PHY register at offset and store the retrieved information in
   12268  *  data, or write data to PHY register at offset.  Note the procedure to
   12269  *  access the PHY wakeup registers is different than reading the other PHY
   12270  *  registers. It works as such:
   12271  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12272  *  2) Set page to 800 for host (801 if we were manageability)
   12273  *  3) Write the address using the address opcode (0x11)
   12274  *  4) Read or write the data using the data opcode (0x12)
   12275  *  5) Restore 769.17.2 to its original value
   12276  *
   12277  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12278  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12279  *
   12280  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12281  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12282  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12283  */
   12284 static int
   12285 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12286     bool page_set)
   12287 {
   12288 	struct wm_softc *sc = device_private(dev);
   12289 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12290 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12291 	uint16_t wuce;
   12292 	int rv = 0;
   12293 
   12294 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12295 		device_xname(dev), __func__));
   12296 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12297 	if ((sc->sc_type == WM_T_PCH)
   12298 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12299 		device_printf(dev,
   12300 		    "Attempting to access page %d while gig enabled.\n", page);
   12301 	}
   12302 
   12303 	if (!page_set) {
   12304 		/* Enable access to PHY wakeup registers */
   12305 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12306 		if (rv != 0) {
   12307 			device_printf(dev,
   12308 			    "%s: Could not enable PHY wakeup reg access\n",
   12309 			    __func__);
   12310 			return rv;
   12311 		}
   12312 	}
   12313 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12314 		device_xname(sc->sc_dev), __func__, page, regnum));
   12315 
   12316 	/*
   12317 	 * 2) Access PHY wakeup register.
   12318 	 * See wm_access_phy_wakeup_reg_bm.
   12319 	 */
   12320 
   12321 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12322 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12323 	if (rv != 0)
   12324 		return rv;
   12325 
   12326 	if (rd) {
   12327 		/* Read the Wakeup register page value using opcode 0x12 */
   12328 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12329 	} else {
   12330 		/* Write the Wakeup register page value using opcode 0x12 */
   12331 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12332 	}
   12333 	if (rv != 0)
   12334 		return rv;
   12335 
   12336 	if (!page_set)
   12337 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12338 
   12339 	return rv;
   12340 }
   12341 
   12342 /*
   12343  * wm_gmii_hv_readreg:	[mii interface function]
   12344  *
   12345  *	Read a PHY register on the kumeran
   12346  * This could be handled by the PHY layer if we didn't have to lock the
   12347  * resource ...
   12348  */
   12349 static int
   12350 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12351 {
   12352 	struct wm_softc *sc = device_private(dev);
   12353 	int rv;
   12354 
   12355 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12356 		device_xname(dev), __func__));
   12357 
   12358 	rv = sc->phy.acquire(sc);
   12359 	if (rv != 0) {
   12360 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12361 		return rv;
   12362 	}
   12363 
   12364 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12365 	sc->phy.release(sc);
   12366 	return rv;
   12367 }
   12368 
   12369 static int
   12370 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12371 {
   12372 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12373 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12374 	int rv;
   12375 
   12376 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12377 
   12378 	/* Page 800 works differently than the rest so it has its own func */
   12379 	if (page == BM_WUC_PAGE)
   12380 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12381 
   12382 	/*
   12383 	 * Lower than page 768 works differently than the rest so it has its
   12384 	 * own func
   12385 	 */
   12386 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12387 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12388 		return -1;
   12389 	}
   12390 
   12391 	/*
   12392 	 * XXX I21[789] documents say that the SMBus Address register is at
   12393 	 * PHY address 01, Page 0 (not 768), Register 26.
   12394 	 */
   12395 	if (page == HV_INTC_FC_PAGE_START)
   12396 		page = 0;
   12397 
   12398 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12399 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12400 		    page << BME1000_PAGE_SHIFT);
   12401 		if (rv != 0)
   12402 			return rv;
   12403 	}
   12404 
   12405 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12406 }
   12407 
   12408 /*
   12409  * wm_gmii_hv_writereg:	[mii interface function]
   12410  *
   12411  *	Write a PHY register on the kumeran.
   12412  * This could be handled by the PHY layer if we didn't have to lock the
   12413  * resource ...
   12414  */
   12415 static int
   12416 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12417 {
   12418 	struct wm_softc *sc = device_private(dev);
   12419 	int rv;
   12420 
   12421 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12422 		device_xname(dev), __func__));
   12423 
   12424 	rv = sc->phy.acquire(sc);
   12425 	if (rv != 0) {
   12426 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12427 		return rv;
   12428 	}
   12429 
   12430 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12431 	sc->phy.release(sc);
   12432 
   12433 	return rv;
   12434 }
   12435 
   12436 static int
   12437 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12438 {
   12439 	struct wm_softc *sc = device_private(dev);
   12440 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12441 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12442 	int rv;
   12443 
   12444 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12445 
   12446 	/* Page 800 works differently than the rest so it has its own func */
   12447 	if (page == BM_WUC_PAGE)
   12448 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12449 		    false);
   12450 
   12451 	/*
   12452 	 * Lower than page 768 works differently than the rest so it has its
   12453 	 * own func
   12454 	 */
   12455 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12456 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12457 		return -1;
   12458 	}
   12459 
   12460 	{
   12461 		/*
   12462 		 * XXX I21[789] documents say that the SMBus Address register
   12463 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12464 		 */
   12465 		if (page == HV_INTC_FC_PAGE_START)
   12466 			page = 0;
   12467 
   12468 		/*
   12469 		 * XXX Workaround MDIO accesses being disabled after entering
   12470 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12471 		 * register is set)
   12472 		 */
   12473 		if (sc->sc_phytype == WMPHY_82578) {
   12474 			struct mii_softc *child;
   12475 
   12476 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12477 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12478 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12479 			    && ((val & (1 << 11)) != 0)) {
   12480 				device_printf(dev, "XXX need workaround\n");
   12481 			}
   12482 		}
   12483 
   12484 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12485 			rv = wm_gmii_mdic_writereg(dev, 1,
   12486 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12487 			if (rv != 0)
   12488 				return rv;
   12489 		}
   12490 	}
   12491 
   12492 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12493 }
   12494 
   12495 /*
   12496  * wm_gmii_82580_readreg:	[mii interface function]
   12497  *
   12498  *	Read a PHY register on the 82580 and I350.
   12499  * This could be handled by the PHY layer if we didn't have to lock the
   12500  * resource ...
   12501  */
   12502 static int
   12503 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12504 {
   12505 	struct wm_softc *sc = device_private(dev);
   12506 	int rv;
   12507 
   12508 	rv = sc->phy.acquire(sc);
   12509 	if (rv != 0) {
   12510 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12511 		return rv;
   12512 	}
   12513 
   12514 #ifdef DIAGNOSTIC
   12515 	if (reg > MII_ADDRMASK) {
   12516 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12517 		    __func__, sc->sc_phytype, reg);
   12518 		reg &= MII_ADDRMASK;
   12519 	}
   12520 #endif
   12521 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12522 
   12523 	sc->phy.release(sc);
   12524 	return rv;
   12525 }
   12526 
   12527 /*
   12528  * wm_gmii_82580_writereg:	[mii interface function]
   12529  *
   12530  *	Write a PHY register on the 82580 and I350.
   12531  * This could be handled by the PHY layer if we didn't have to lock the
   12532  * resource ...
   12533  */
   12534 static int
   12535 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12536 {
   12537 	struct wm_softc *sc = device_private(dev);
   12538 	int rv;
   12539 
   12540 	rv = sc->phy.acquire(sc);
   12541 	if (rv != 0) {
   12542 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12543 		return rv;
   12544 	}
   12545 
   12546 #ifdef DIAGNOSTIC
   12547 	if (reg > MII_ADDRMASK) {
   12548 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12549 		    __func__, sc->sc_phytype, reg);
   12550 		reg &= MII_ADDRMASK;
   12551 	}
   12552 #endif
   12553 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12554 
   12555 	sc->phy.release(sc);
   12556 	return rv;
   12557 }
   12558 
   12559 /*
   12560  * wm_gmii_gs40g_readreg:	[mii interface function]
   12561  *
   12562  *	Read a PHY register on the I2100 and I211.
   12563  * This could be handled by the PHY layer if we didn't have to lock the
   12564  * resource ...
   12565  */
   12566 static int
   12567 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12568 {
   12569 	struct wm_softc *sc = device_private(dev);
   12570 	int page, offset;
   12571 	int rv;
   12572 
   12573 	/* Acquire semaphore */
   12574 	rv = sc->phy.acquire(sc);
   12575 	if (rv != 0) {
   12576 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12577 		return rv;
   12578 	}
   12579 
   12580 	/* Page select */
   12581 	page = reg >> GS40G_PAGE_SHIFT;
   12582 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12583 	if (rv != 0)
   12584 		goto release;
   12585 
   12586 	/* Read reg */
   12587 	offset = reg & GS40G_OFFSET_MASK;
   12588 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12589 
   12590 release:
   12591 	sc->phy.release(sc);
   12592 	return rv;
   12593 }
   12594 
   12595 /*
   12596  * wm_gmii_gs40g_writereg:	[mii interface function]
   12597  *
   12598  *	Write a PHY register on the I210 and I211.
   12599  * This could be handled by the PHY layer if we didn't have to lock the
   12600  * resource ...
   12601  */
   12602 static int
   12603 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12604 {
   12605 	struct wm_softc *sc = device_private(dev);
   12606 	uint16_t page;
   12607 	int offset, rv;
   12608 
   12609 	/* Acquire semaphore */
   12610 	rv = sc->phy.acquire(sc);
   12611 	if (rv != 0) {
   12612 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12613 		return rv;
   12614 	}
   12615 
   12616 	/* Page select */
   12617 	page = reg >> GS40G_PAGE_SHIFT;
   12618 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12619 	if (rv != 0)
   12620 		goto release;
   12621 
   12622 	/* Write reg */
   12623 	offset = reg & GS40G_OFFSET_MASK;
   12624 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12625 
   12626 release:
   12627 	/* Release semaphore */
   12628 	sc->phy.release(sc);
   12629 	return rv;
   12630 }
   12631 
   12632 /*
   12633  * wm_gmii_statchg:	[mii interface function]
   12634  *
   12635  *	Callback from MII layer when media changes.
   12636  */
   12637 static void
   12638 wm_gmii_statchg(struct ifnet *ifp)
   12639 {
   12640 	struct wm_softc *sc = ifp->if_softc;
   12641 	struct mii_data *mii = &sc->sc_mii;
   12642 
   12643 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12644 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12645 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12646 
   12647 	/* Get flow control negotiation result. */
   12648 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12649 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12650 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12651 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12652 	}
   12653 
   12654 	if (sc->sc_flowflags & IFM_FLOW) {
   12655 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12656 			sc->sc_ctrl |= CTRL_TFCE;
   12657 			sc->sc_fcrtl |= FCRTL_XONE;
   12658 		}
   12659 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12660 			sc->sc_ctrl |= CTRL_RFCE;
   12661 	}
   12662 
   12663 	if (mii->mii_media_active & IFM_FDX) {
   12664 		DPRINTF(sc, WM_DEBUG_LINK,
   12665 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12666 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12667 	} else {
   12668 		DPRINTF(sc, WM_DEBUG_LINK,
   12669 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12670 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12671 	}
   12672 
   12673 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12674 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12675 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12676 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12677 	if (sc->sc_type == WM_T_80003) {
   12678 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12679 		case IFM_1000_T:
   12680 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12681 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12682 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12683 			break;
   12684 		default:
   12685 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12686 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12687 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12688 			break;
   12689 		}
   12690 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12691 	}
   12692 }
   12693 
   12694 /* kumeran related (80003, ICH* and PCH*) */
   12695 
   12696 /*
   12697  * wm_kmrn_readreg:
   12698  *
   12699  *	Read a kumeran register
   12700  */
   12701 static int
   12702 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12703 {
   12704 	int rv;
   12705 
   12706 	if (sc->sc_type == WM_T_80003)
   12707 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12708 	else
   12709 		rv = sc->phy.acquire(sc);
   12710 	if (rv != 0) {
   12711 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12712 		    __func__);
   12713 		return rv;
   12714 	}
   12715 
   12716 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12717 
   12718 	if (sc->sc_type == WM_T_80003)
   12719 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12720 	else
   12721 		sc->phy.release(sc);
   12722 
   12723 	return rv;
   12724 }
   12725 
   12726 static int
   12727 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12728 {
   12729 
   12730 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12731 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12732 	    KUMCTRLSTA_REN);
   12733 	CSR_WRITE_FLUSH(sc);
   12734 	delay(2);
   12735 
   12736 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12737 
   12738 	return 0;
   12739 }
   12740 
   12741 /*
   12742  * wm_kmrn_writereg:
   12743  *
   12744  *	Write a kumeran register
   12745  */
   12746 static int
   12747 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12748 {
   12749 	int rv;
   12750 
   12751 	if (sc->sc_type == WM_T_80003)
   12752 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12753 	else
   12754 		rv = sc->phy.acquire(sc);
   12755 	if (rv != 0) {
   12756 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12757 		    __func__);
   12758 		return rv;
   12759 	}
   12760 
   12761 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12762 
   12763 	if (sc->sc_type == WM_T_80003)
   12764 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12765 	else
   12766 		sc->phy.release(sc);
   12767 
   12768 	return rv;
   12769 }
   12770 
   12771 static int
   12772 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12773 {
   12774 
   12775 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12776 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12777 
   12778 	return 0;
   12779 }
   12780 
   12781 /*
   12782  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12783  * This access method is different from IEEE MMD.
   12784  */
   12785 static int
   12786 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12787 {
   12788 	struct wm_softc *sc = device_private(dev);
   12789 	int rv;
   12790 
   12791 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12792 	if (rv != 0)
   12793 		return rv;
   12794 
   12795 	if (rd)
   12796 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12797 	else
   12798 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12799 	return rv;
   12800 }
   12801 
   12802 static int
   12803 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12804 {
   12805 
   12806 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12807 }
   12808 
   12809 static int
   12810 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12811 {
   12812 
   12813 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12814 }
   12815 
   12816 /* SGMII related */
   12817 
   12818 /*
   12819  * wm_sgmii_uses_mdio
   12820  *
   12821  * Check whether the transaction is to the internal PHY or the external
   12822  * MDIO interface. Return true if it's MDIO.
   12823  */
   12824 static bool
   12825 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12826 {
   12827 	uint32_t reg;
   12828 	bool ismdio = false;
   12829 
   12830 	switch (sc->sc_type) {
   12831 	case WM_T_82575:
   12832 	case WM_T_82576:
   12833 		reg = CSR_READ(sc, WMREG_MDIC);
   12834 		ismdio = ((reg & MDIC_DEST) != 0);
   12835 		break;
   12836 	case WM_T_82580:
   12837 	case WM_T_I350:
   12838 	case WM_T_I354:
   12839 	case WM_T_I210:
   12840 	case WM_T_I211:
   12841 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12842 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12843 		break;
   12844 	default:
   12845 		break;
   12846 	}
   12847 
   12848 	return ismdio;
   12849 }
   12850 
   12851 /* Setup internal SGMII PHY for SFP */
   12852 static void
   12853 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12854 {
   12855 	uint16_t id1, id2, phyreg;
   12856 	int i, rv;
   12857 
   12858 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12859 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12860 		return;
   12861 
   12862 	for (i = 0; i < MII_NPHY; i++) {
   12863 		sc->phy.no_errprint = true;
   12864 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12865 		if (rv != 0)
   12866 			continue;
   12867 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12868 		if (rv != 0)
   12869 			continue;
   12870 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12871 			continue;
   12872 		sc->phy.no_errprint = false;
   12873 
   12874 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12875 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12876 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12877 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12878 		break;
   12879 	}
   12880 
   12881 }
   12882 
   12883 /*
   12884  * wm_sgmii_readreg:	[mii interface function]
   12885  *
   12886  *	Read a PHY register on the SGMII
   12887  * This could be handled by the PHY layer if we didn't have to lock the
   12888  * resource ...
   12889  */
   12890 static int
   12891 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12892 {
   12893 	struct wm_softc *sc = device_private(dev);
   12894 	int rv;
   12895 
   12896 	rv = sc->phy.acquire(sc);
   12897 	if (rv != 0) {
   12898 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12899 		return rv;
   12900 	}
   12901 
   12902 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12903 
   12904 	sc->phy.release(sc);
   12905 	return rv;
   12906 }
   12907 
   12908 static int
   12909 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12910 {
   12911 	struct wm_softc *sc = device_private(dev);
   12912 	uint32_t i2ccmd;
   12913 	int i, rv = 0;
   12914 
   12915 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12916 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12917 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12918 
   12919 	/* Poll the ready bit */
   12920 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12921 		delay(50);
   12922 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12923 		if (i2ccmd & I2CCMD_READY)
   12924 			break;
   12925 	}
   12926 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12927 		device_printf(dev, "I2CCMD Read did not complete\n");
   12928 		rv = ETIMEDOUT;
   12929 	}
   12930 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12931 		if (!sc->phy.no_errprint)
   12932 			device_printf(dev, "I2CCMD Error bit set\n");
   12933 		rv = EIO;
   12934 	}
   12935 
   12936 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12937 
   12938 	return rv;
   12939 }
   12940 
   12941 /*
   12942  * wm_sgmii_writereg:	[mii interface function]
   12943  *
   12944  *	Write a PHY register on the SGMII.
   12945  * This could be handled by the PHY layer if we didn't have to lock the
   12946  * resource ...
   12947  */
   12948 static int
   12949 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12950 {
   12951 	struct wm_softc *sc = device_private(dev);
   12952 	int rv;
   12953 
   12954 	rv = sc->phy.acquire(sc);
   12955 	if (rv != 0) {
   12956 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12957 		return rv;
   12958 	}
   12959 
   12960 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12961 
   12962 	sc->phy.release(sc);
   12963 
   12964 	return rv;
   12965 }
   12966 
   12967 static int
   12968 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12969 {
   12970 	struct wm_softc *sc = device_private(dev);
   12971 	uint32_t i2ccmd;
   12972 	uint16_t swapdata;
   12973 	int rv = 0;
   12974 	int i;
   12975 
   12976 	/* Swap the data bytes for the I2C interface */
   12977 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12978 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12979 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12980 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12981 
   12982 	/* Poll the ready bit */
   12983 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12984 		delay(50);
   12985 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12986 		if (i2ccmd & I2CCMD_READY)
   12987 			break;
   12988 	}
   12989 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12990 		device_printf(dev, "I2CCMD Write did not complete\n");
   12991 		rv = ETIMEDOUT;
   12992 	}
   12993 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12994 		device_printf(dev, "I2CCMD Error bit set\n");
   12995 		rv = EIO;
   12996 	}
   12997 
   12998 	return rv;
   12999 }
   13000 
   13001 /* TBI related */
   13002 
   13003 static bool
   13004 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   13005 {
   13006 	bool sig;
   13007 
   13008 	sig = ctrl & CTRL_SWDPIN(1);
   13009 
   13010 	/*
   13011 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   13012 	 * detect a signal, 1 if they don't.
   13013 	 */
   13014 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   13015 		sig = !sig;
   13016 
   13017 	return sig;
   13018 }
   13019 
   13020 /*
   13021  * wm_tbi_mediainit:
   13022  *
   13023  *	Initialize media for use on 1000BASE-X devices.
   13024  */
   13025 static void
   13026 wm_tbi_mediainit(struct wm_softc *sc)
   13027 {
   13028 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13029 	const char *sep = "";
   13030 
   13031 	if (sc->sc_type < WM_T_82543)
   13032 		sc->sc_tipg = TIPG_WM_DFLT;
   13033 	else
   13034 		sc->sc_tipg = TIPG_LG_DFLT;
   13035 
   13036 	sc->sc_tbi_serdes_anegticks = 5;
   13037 
   13038 	/* Initialize our media structures */
   13039 	sc->sc_mii.mii_ifp = ifp;
   13040 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   13041 
   13042 	ifp->if_baudrate = IF_Gbps(1);
   13043 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   13044 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13045 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13046 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   13047 		    sc->sc_core_lock);
   13048 	} else {
   13049 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   13050 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   13051 	}
   13052 
   13053 	/*
   13054 	 * SWD Pins:
   13055 	 *
   13056 	 *	0 = Link LED (output)
   13057 	 *	1 = Loss Of Signal (input)
   13058 	 */
   13059 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   13060 
   13061 	/* XXX Perhaps this is only for TBI */
   13062 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13063 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   13064 
   13065 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   13066 		sc->sc_ctrl &= ~CTRL_LRST;
   13067 
   13068 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13069 
   13070 #define	ADD(ss, mm, dd)							  \
   13071 do {									  \
   13072 	aprint_normal("%s%s", sep, ss);					  \
   13073 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   13074 	sep = ", ";							  \
   13075 } while (/*CONSTCOND*/0)
   13076 
   13077 	aprint_normal_dev(sc->sc_dev, "");
   13078 
   13079 	if (sc->sc_type == WM_T_I354) {
   13080 		uint32_t status;
   13081 
   13082 		status = CSR_READ(sc, WMREG_STATUS);
   13083 		if (((status & STATUS_2P5_SKU) != 0)
   13084 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13085 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   13086 		} else
   13087 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   13088 	} else if (sc->sc_type == WM_T_82545) {
   13089 		/* Only 82545 is LX (XXX except SFP) */
   13090 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13091 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13092 	} else if (sc->sc_sfptype != 0) {
   13093 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   13094 		switch (sc->sc_sfptype) {
   13095 		default:
   13096 		case SFF_SFP_ETH_FLAGS_1000SX:
   13097 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13098 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13099 			break;
   13100 		case SFF_SFP_ETH_FLAGS_1000LX:
   13101 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   13102 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   13103 			break;
   13104 		case SFF_SFP_ETH_FLAGS_1000CX:
   13105 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   13106 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   13107 			break;
   13108 		case SFF_SFP_ETH_FLAGS_1000T:
   13109 			ADD("1000baseT", IFM_1000_T, 0);
   13110 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   13111 			break;
   13112 		case SFF_SFP_ETH_FLAGS_100FX:
   13113 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   13114 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   13115 			break;
   13116 		}
   13117 	} else {
   13118 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   13119 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   13120 	}
   13121 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   13122 	aprint_normal("\n");
   13123 
   13124 #undef ADD
   13125 
   13126 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   13127 }
   13128 
   13129 /*
   13130  * wm_tbi_mediachange:	[ifmedia interface function]
   13131  *
   13132  *	Set hardware to newly-selected media on a 1000BASE-X device.
   13133  */
   13134 static int
   13135 wm_tbi_mediachange(struct ifnet *ifp)
   13136 {
   13137 	struct wm_softc *sc = ifp->if_softc;
   13138 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13139 	uint32_t status, ctrl;
   13140 	bool signal;
   13141 	int i;
   13142 
   13143 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   13144 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13145 		/* XXX need some work for >= 82571 and < 82575 */
   13146 		if (sc->sc_type < WM_T_82575)
   13147 			return 0;
   13148 	}
   13149 
   13150 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13151 	    || (sc->sc_type >= WM_T_82575))
   13152 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13153 
   13154 	sc->sc_ctrl &= ~CTRL_LRST;
   13155 	sc->sc_txcw = TXCW_ANE;
   13156 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13157 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   13158 	else if (ife->ifm_media & IFM_FDX)
   13159 		sc->sc_txcw |= TXCW_FD;
   13160 	else
   13161 		sc->sc_txcw |= TXCW_HD;
   13162 
   13163 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   13164 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   13165 
   13166 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   13167 		device_xname(sc->sc_dev), sc->sc_txcw));
   13168 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13169 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13170 	CSR_WRITE_FLUSH(sc);
   13171 	delay(1000);
   13172 
   13173 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13174 	signal = wm_tbi_havesignal(sc, ctrl);
   13175 
   13176 	DPRINTF(sc, WM_DEBUG_LINK,
   13177 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   13178 
   13179 	if (signal) {
   13180 		/* Have signal; wait for the link to come up. */
   13181 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   13182 			delay(10000);
   13183 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   13184 				break;
   13185 		}
   13186 
   13187 		DPRINTF(sc, WM_DEBUG_LINK,
   13188 		    ("%s: i = %d after waiting for link\n",
   13189 			device_xname(sc->sc_dev), i));
   13190 
   13191 		status = CSR_READ(sc, WMREG_STATUS);
   13192 		DPRINTF(sc, WM_DEBUG_LINK,
   13193 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   13194 			__PRIxBIT "\n",
   13195 			device_xname(sc->sc_dev), status, STATUS_LU));
   13196 		if (status & STATUS_LU) {
   13197 			/* Link is up. */
   13198 			DPRINTF(sc, WM_DEBUG_LINK,
   13199 			    ("%s: LINK: set media -> link up %s\n",
   13200 				device_xname(sc->sc_dev),
   13201 				(status & STATUS_FD) ? "FDX" : "HDX"));
   13202 
   13203 			/*
   13204 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   13205 			 * so we should update sc->sc_ctrl
   13206 			 */
   13207 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   13208 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   13209 			sc->sc_fcrtl &= ~FCRTL_XONE;
   13210 			if (status & STATUS_FD)
   13211 				sc->sc_tctl |=
   13212 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   13213 			else
   13214 				sc->sc_tctl |=
   13215 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   13216 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   13217 				sc->sc_fcrtl |= FCRTL_XONE;
   13218 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13219 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13220 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13221 			sc->sc_tbi_linkup = 1;
   13222 		} else {
   13223 			if (i == WM_LINKUP_TIMEOUT)
   13224 				wm_check_for_link(sc);
   13225 			/* Link is down. */
   13226 			DPRINTF(sc, WM_DEBUG_LINK,
   13227 			    ("%s: LINK: set media -> link down\n",
   13228 				device_xname(sc->sc_dev)));
   13229 			sc->sc_tbi_linkup = 0;
   13230 		}
   13231 	} else {
   13232 		DPRINTF(sc, WM_DEBUG_LINK,
   13233 		    ("%s: LINK: set media -> no signal\n",
   13234 			device_xname(sc->sc_dev)));
   13235 		sc->sc_tbi_linkup = 0;
   13236 	}
   13237 
   13238 	wm_tbi_serdes_set_linkled(sc);
   13239 
   13240 	return 0;
   13241 }
   13242 
   13243 /*
   13244  * wm_tbi_mediastatus:	[ifmedia interface function]
   13245  *
   13246  *	Get the current interface media status on a 1000BASE-X device.
   13247  */
   13248 static void
   13249 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13250 {
   13251 	struct wm_softc *sc = ifp->if_softc;
   13252 	uint32_t ctrl, status;
   13253 
   13254 	ifmr->ifm_status = IFM_AVALID;
   13255 	ifmr->ifm_active = IFM_ETHER;
   13256 
   13257 	status = CSR_READ(sc, WMREG_STATUS);
   13258 	if ((status & STATUS_LU) == 0) {
   13259 		ifmr->ifm_active |= IFM_NONE;
   13260 		return;
   13261 	}
   13262 
   13263 	ifmr->ifm_status |= IFM_ACTIVE;
   13264 	/* Only 82545 is LX */
   13265 	if (sc->sc_type == WM_T_82545)
   13266 		ifmr->ifm_active |= IFM_1000_LX;
   13267 	else
   13268 		ifmr->ifm_active |= IFM_1000_SX;
   13269 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13270 		ifmr->ifm_active |= IFM_FDX;
   13271 	else
   13272 		ifmr->ifm_active |= IFM_HDX;
   13273 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13274 	if (ctrl & CTRL_RFCE)
   13275 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13276 	if (ctrl & CTRL_TFCE)
   13277 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13278 }
   13279 
   13280 /* XXX TBI only */
   13281 static int
   13282 wm_check_for_link(struct wm_softc *sc)
   13283 {
   13284 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13285 	uint32_t rxcw;
   13286 	uint32_t ctrl;
   13287 	uint32_t status;
   13288 	bool signal;
   13289 
   13290 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13291 		device_xname(sc->sc_dev), __func__));
   13292 
   13293 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13294 		/* XXX need some work for >= 82571 */
   13295 		if (sc->sc_type >= WM_T_82571) {
   13296 			sc->sc_tbi_linkup = 1;
   13297 			return 0;
   13298 		}
   13299 	}
   13300 
   13301 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13302 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13303 	status = CSR_READ(sc, WMREG_STATUS);
   13304 	signal = wm_tbi_havesignal(sc, ctrl);
   13305 
   13306 	DPRINTF(sc, WM_DEBUG_LINK,
   13307 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13308 		device_xname(sc->sc_dev), __func__, signal,
   13309 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13310 
   13311 	/*
   13312 	 * SWDPIN   LU RXCW
   13313 	 *	0    0	  0
   13314 	 *	0    0	  1	(should not happen)
   13315 	 *	0    1	  0	(should not happen)
   13316 	 *	0    1	  1	(should not happen)
   13317 	 *	1    0	  0	Disable autonego and force linkup
   13318 	 *	1    0	  1	got /C/ but not linkup yet
   13319 	 *	1    1	  0	(linkup)
   13320 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13321 	 *
   13322 	 */
   13323 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13324 		DPRINTF(sc, WM_DEBUG_LINK,
   13325 		    ("%s: %s: force linkup and fullduplex\n",
   13326 			device_xname(sc->sc_dev), __func__));
   13327 		sc->sc_tbi_linkup = 0;
   13328 		/* Disable auto-negotiation in the TXCW register */
   13329 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13330 
   13331 		/*
   13332 		 * Force link-up and also force full-duplex.
   13333 		 *
   13334 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13335 		 * so we should update sc->sc_ctrl
   13336 		 */
   13337 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13338 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13339 	} else if (((status & STATUS_LU) != 0)
   13340 	    && ((rxcw & RXCW_C) != 0)
   13341 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13342 		sc->sc_tbi_linkup = 1;
   13343 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13344 			device_xname(sc->sc_dev), __func__));
   13345 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13346 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13347 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13348 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13349 			device_xname(sc->sc_dev), __func__));
   13350 	} else {
   13351 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13352 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13353 			status));
   13354 	}
   13355 
   13356 	return 0;
   13357 }
   13358 
   13359 /*
   13360  * wm_tbi_tick:
   13361  *
   13362  *	Check the link on TBI devices.
   13363  *	This function acts as mii_tick().
   13364  */
   13365 static void
   13366 wm_tbi_tick(struct wm_softc *sc)
   13367 {
   13368 	struct mii_data *mii = &sc->sc_mii;
   13369 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13370 	uint32_t status;
   13371 
   13372 	KASSERT(mutex_owned(sc->sc_core_lock));
   13373 
   13374 	status = CSR_READ(sc, WMREG_STATUS);
   13375 
   13376 	/* XXX is this needed? */
   13377 	(void)CSR_READ(sc, WMREG_RXCW);
   13378 	(void)CSR_READ(sc, WMREG_CTRL);
   13379 
   13380 	/* set link status */
   13381 	if ((status & STATUS_LU) == 0) {
   13382 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13383 			device_xname(sc->sc_dev)));
   13384 		sc->sc_tbi_linkup = 0;
   13385 	} else if (sc->sc_tbi_linkup == 0) {
   13386 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13387 			device_xname(sc->sc_dev),
   13388 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13389 		sc->sc_tbi_linkup = 1;
   13390 		sc->sc_tbi_serdes_ticks = 0;
   13391 	}
   13392 
   13393 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13394 		goto setled;
   13395 
   13396 	if ((status & STATUS_LU) == 0) {
   13397 		sc->sc_tbi_linkup = 0;
   13398 		/* If the timer expired, retry autonegotiation */
   13399 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13400 		    && (++sc->sc_tbi_serdes_ticks
   13401 			>= sc->sc_tbi_serdes_anegticks)) {
   13402 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13403 				device_xname(sc->sc_dev), __func__));
   13404 			sc->sc_tbi_serdes_ticks = 0;
   13405 			/*
   13406 			 * Reset the link, and let autonegotiation do
   13407 			 * its thing
   13408 			 */
   13409 			sc->sc_ctrl |= CTRL_LRST;
   13410 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13411 			CSR_WRITE_FLUSH(sc);
   13412 			delay(1000);
   13413 			sc->sc_ctrl &= ~CTRL_LRST;
   13414 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13415 			CSR_WRITE_FLUSH(sc);
   13416 			delay(1000);
   13417 			CSR_WRITE(sc, WMREG_TXCW,
   13418 			    sc->sc_txcw & ~TXCW_ANE);
   13419 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13420 		}
   13421 	}
   13422 
   13423 setled:
   13424 	wm_tbi_serdes_set_linkled(sc);
   13425 }
   13426 
   13427 /* SERDES related */
   13428 static void
   13429 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13430 {
   13431 	uint32_t reg;
   13432 
   13433 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13434 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13435 		return;
   13436 
   13437 	/* Enable PCS to turn on link */
   13438 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13439 	reg |= PCS_CFG_PCS_EN;
   13440 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13441 
   13442 	/* Power up the laser */
   13443 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13444 	reg &= ~CTRL_EXT_SWDPIN(3);
   13445 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13446 
   13447 	/* Flush the write to verify completion */
   13448 	CSR_WRITE_FLUSH(sc);
   13449 	delay(1000);
   13450 }
   13451 
   13452 static int
   13453 wm_serdes_mediachange(struct ifnet *ifp)
   13454 {
   13455 	struct wm_softc *sc = ifp->if_softc;
   13456 	bool pcs_autoneg = true; /* XXX */
   13457 	uint32_t ctrl_ext, pcs_lctl, reg;
   13458 
   13459 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13460 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13461 		return 0;
   13462 
   13463 	/* XXX Currently, this function is not called on 8257[12] */
   13464 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13465 	    || (sc->sc_type >= WM_T_82575))
   13466 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13467 
   13468 	/* Power on the sfp cage if present */
   13469 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13470 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13471 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13472 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13473 
   13474 	sc->sc_ctrl |= CTRL_SLU;
   13475 
   13476 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13477 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13478 
   13479 		reg = CSR_READ(sc, WMREG_CONNSW);
   13480 		reg |= CONNSW_ENRGSRC;
   13481 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13482 	}
   13483 
   13484 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13485 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13486 	case CTRL_EXT_LINK_MODE_SGMII:
   13487 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13488 		pcs_autoneg = true;
   13489 		/* Autoneg time out should be disabled for SGMII mode */
   13490 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13491 		break;
   13492 	case CTRL_EXT_LINK_MODE_1000KX:
   13493 		pcs_autoneg = false;
   13494 		/* FALLTHROUGH */
   13495 	default:
   13496 		if ((sc->sc_type == WM_T_82575)
   13497 		    || (sc->sc_type == WM_T_82576)) {
   13498 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13499 				pcs_autoneg = false;
   13500 		}
   13501 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13502 		    | CTRL_FRCFDX;
   13503 
   13504 		/* Set speed of 1000/Full if speed/duplex is forced */
   13505 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13506 	}
   13507 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13508 
   13509 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13510 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13511 
   13512 	if (pcs_autoneg) {
   13513 		/* Set PCS register for autoneg */
   13514 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13515 
   13516 		/* Disable force flow control for autoneg */
   13517 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13518 
   13519 		/* Configure flow control advertisement for autoneg */
   13520 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13521 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13522 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13523 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13524 	} else
   13525 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13526 
   13527 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13528 
   13529 	return 0;
   13530 }
   13531 
   13532 static void
   13533 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13534 {
   13535 	struct wm_softc *sc = ifp->if_softc;
   13536 	struct mii_data *mii = &sc->sc_mii;
   13537 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13538 	uint32_t pcs_adv, pcs_lpab, reg;
   13539 
   13540 	ifmr->ifm_status = IFM_AVALID;
   13541 	ifmr->ifm_active = IFM_ETHER;
   13542 
   13543 	/* Check PCS */
   13544 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13545 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13546 		ifmr->ifm_active |= IFM_NONE;
   13547 		sc->sc_tbi_linkup = 0;
   13548 		goto setled;
   13549 	}
   13550 
   13551 	sc->sc_tbi_linkup = 1;
   13552 	ifmr->ifm_status |= IFM_ACTIVE;
   13553 	if (sc->sc_type == WM_T_I354) {
   13554 		uint32_t status;
   13555 
   13556 		status = CSR_READ(sc, WMREG_STATUS);
   13557 		if (((status & STATUS_2P5_SKU) != 0)
   13558 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13559 			ifmr->ifm_active |= IFM_2500_KX;
   13560 		} else
   13561 			ifmr->ifm_active |= IFM_1000_KX;
   13562 	} else {
   13563 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13564 		case PCS_LSTS_SPEED_10:
   13565 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13566 			break;
   13567 		case PCS_LSTS_SPEED_100:
   13568 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13569 			break;
   13570 		case PCS_LSTS_SPEED_1000:
   13571 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13572 			break;
   13573 		default:
   13574 			device_printf(sc->sc_dev, "Unknown speed\n");
   13575 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13576 			break;
   13577 		}
   13578 	}
   13579 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13580 	if ((reg & PCS_LSTS_FDX) != 0)
   13581 		ifmr->ifm_active |= IFM_FDX;
   13582 	else
   13583 		ifmr->ifm_active |= IFM_HDX;
   13584 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13585 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13586 		/* Check flow */
   13587 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13588 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13589 			DPRINTF(sc, WM_DEBUG_LINK,
   13590 			    ("XXX LINKOK but not ACOMP\n"));
   13591 			goto setled;
   13592 		}
   13593 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13594 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13595 		DPRINTF(sc, WM_DEBUG_LINK,
   13596 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13597 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13598 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13599 			mii->mii_media_active |= IFM_FLOW
   13600 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13601 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13602 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13603 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13604 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13605 			mii->mii_media_active |= IFM_FLOW
   13606 			    | IFM_ETH_TXPAUSE;
   13607 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13608 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13609 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13610 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13611 			mii->mii_media_active |= IFM_FLOW
   13612 			    | IFM_ETH_RXPAUSE;
   13613 		}
   13614 	}
   13615 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13616 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13617 setled:
   13618 	wm_tbi_serdes_set_linkled(sc);
   13619 }
   13620 
   13621 /*
   13622  * wm_serdes_tick:
   13623  *
   13624  *	Check the link on serdes devices.
   13625  */
   13626 static void
   13627 wm_serdes_tick(struct wm_softc *sc)
   13628 {
   13629 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13630 	struct mii_data *mii = &sc->sc_mii;
   13631 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13632 	uint32_t reg;
   13633 
   13634 	KASSERT(mutex_owned(sc->sc_core_lock));
   13635 
   13636 	mii->mii_media_status = IFM_AVALID;
   13637 	mii->mii_media_active = IFM_ETHER;
   13638 
   13639 	/* Check PCS */
   13640 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13641 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13642 		mii->mii_media_status |= IFM_ACTIVE;
   13643 		sc->sc_tbi_linkup = 1;
   13644 		sc->sc_tbi_serdes_ticks = 0;
   13645 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13646 		if ((reg & PCS_LSTS_FDX) != 0)
   13647 			mii->mii_media_active |= IFM_FDX;
   13648 		else
   13649 			mii->mii_media_active |= IFM_HDX;
   13650 	} else {
   13651 		mii->mii_media_status |= IFM_NONE;
   13652 		sc->sc_tbi_linkup = 0;
   13653 		/* If the timer expired, retry autonegotiation */
   13654 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13655 		    && (++sc->sc_tbi_serdes_ticks
   13656 			>= sc->sc_tbi_serdes_anegticks)) {
   13657 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13658 				device_xname(sc->sc_dev), __func__));
   13659 			sc->sc_tbi_serdes_ticks = 0;
   13660 			/* XXX */
   13661 			wm_serdes_mediachange(ifp);
   13662 		}
   13663 	}
   13664 
   13665 	wm_tbi_serdes_set_linkled(sc);
   13666 }
   13667 
   13668 /* SFP related */
   13669 
   13670 static int
   13671 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13672 {
   13673 	uint32_t i2ccmd;
   13674 	int i;
   13675 
   13676 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13677 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13678 
   13679 	/* Poll the ready bit */
   13680 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13681 		delay(50);
   13682 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13683 		if (i2ccmd & I2CCMD_READY)
   13684 			break;
   13685 	}
   13686 	if ((i2ccmd & I2CCMD_READY) == 0)
   13687 		return -1;
   13688 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13689 		return -1;
   13690 
   13691 	*data = i2ccmd & 0x00ff;
   13692 
   13693 	return 0;
   13694 }
   13695 
   13696 static uint32_t
   13697 wm_sfp_get_media_type(struct wm_softc *sc)
   13698 {
   13699 	uint32_t ctrl_ext;
   13700 	uint8_t val = 0;
   13701 	int timeout = 3;
   13702 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13703 	int rv = -1;
   13704 
   13705 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13706 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13707 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13708 	CSR_WRITE_FLUSH(sc);
   13709 
   13710 	/* Read SFP module data */
   13711 	while (timeout) {
   13712 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13713 		if (rv == 0)
   13714 			break;
   13715 		delay(100*1000); /* XXX too big */
   13716 		timeout--;
   13717 	}
   13718 	if (rv != 0)
   13719 		goto out;
   13720 
   13721 	switch (val) {
   13722 	case SFF_SFP_ID_SFF:
   13723 		aprint_normal_dev(sc->sc_dev,
   13724 		    "Module/Connector soldered to board\n");
   13725 		break;
   13726 	case SFF_SFP_ID_SFP:
   13727 		sc->sc_flags |= WM_F_SFP;
   13728 		break;
   13729 	case SFF_SFP_ID_UNKNOWN:
   13730 		goto out;
   13731 	default:
   13732 		break;
   13733 	}
   13734 
   13735 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13736 	if (rv != 0)
   13737 		goto out;
   13738 
   13739 	sc->sc_sfptype = val;
   13740 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13741 		mediatype = WM_MEDIATYPE_SERDES;
   13742 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13743 		sc->sc_flags |= WM_F_SGMII;
   13744 		mediatype = WM_MEDIATYPE_COPPER;
   13745 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13746 		sc->sc_flags |= WM_F_SGMII;
   13747 		mediatype = WM_MEDIATYPE_SERDES;
   13748 	} else {
   13749 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13750 		    __func__, sc->sc_sfptype);
   13751 		sc->sc_sfptype = 0; /* XXX unknown */
   13752 	}
   13753 
   13754 out:
   13755 	/* Restore I2C interface setting */
   13756 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13757 
   13758 	return mediatype;
   13759 }
   13760 
   13761 /*
   13762  * NVM related.
   13763  * Microwire, SPI (w/wo EERD) and Flash.
   13764  */
   13765 
   13766 /* Both spi and uwire */
   13767 
   13768 /*
   13769  * wm_eeprom_sendbits:
   13770  *
   13771  *	Send a series of bits to the EEPROM.
   13772  */
   13773 static void
   13774 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13775 {
   13776 	uint32_t reg;
   13777 	int x;
   13778 
   13779 	reg = CSR_READ(sc, WMREG_EECD);
   13780 
   13781 	for (x = nbits; x > 0; x--) {
   13782 		if (bits & (1U << (x - 1)))
   13783 			reg |= EECD_DI;
   13784 		else
   13785 			reg &= ~EECD_DI;
   13786 		CSR_WRITE(sc, WMREG_EECD, reg);
   13787 		CSR_WRITE_FLUSH(sc);
   13788 		delay(2);
   13789 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13790 		CSR_WRITE_FLUSH(sc);
   13791 		delay(2);
   13792 		CSR_WRITE(sc, WMREG_EECD, reg);
   13793 		CSR_WRITE_FLUSH(sc);
   13794 		delay(2);
   13795 	}
   13796 }
   13797 
   13798 /*
   13799  * wm_eeprom_recvbits:
   13800  *
   13801  *	Receive a series of bits from the EEPROM.
   13802  */
   13803 static void
   13804 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13805 {
   13806 	uint32_t reg, val;
   13807 	int x;
   13808 
   13809 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13810 
   13811 	val = 0;
   13812 	for (x = nbits; x > 0; x--) {
   13813 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13814 		CSR_WRITE_FLUSH(sc);
   13815 		delay(2);
   13816 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13817 			val |= (1U << (x - 1));
   13818 		CSR_WRITE(sc, WMREG_EECD, reg);
   13819 		CSR_WRITE_FLUSH(sc);
   13820 		delay(2);
   13821 	}
   13822 	*valp = val;
   13823 }
   13824 
   13825 /* Microwire */
   13826 
   13827 /*
   13828  * wm_nvm_read_uwire:
   13829  *
   13830  *	Read a word from the EEPROM using the MicroWire protocol.
   13831  */
   13832 static int
   13833 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13834 {
   13835 	uint32_t reg, val;
   13836 	int i, rv;
   13837 
   13838 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13839 		device_xname(sc->sc_dev), __func__));
   13840 
   13841 	rv = sc->nvm.acquire(sc);
   13842 	if (rv != 0)
   13843 		return rv;
   13844 
   13845 	for (i = 0; i < wordcnt; i++) {
   13846 		/* Clear SK and DI. */
   13847 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13848 		CSR_WRITE(sc, WMREG_EECD, reg);
   13849 
   13850 		/*
   13851 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13852 		 * and Xen.
   13853 		 *
   13854 		 * We use this workaround only for 82540 because qemu's
   13855 		 * e1000 act as 82540.
   13856 		 */
   13857 		if (sc->sc_type == WM_T_82540) {
   13858 			reg |= EECD_SK;
   13859 			CSR_WRITE(sc, WMREG_EECD, reg);
   13860 			reg &= ~EECD_SK;
   13861 			CSR_WRITE(sc, WMREG_EECD, reg);
   13862 			CSR_WRITE_FLUSH(sc);
   13863 			delay(2);
   13864 		}
   13865 		/* XXX: end of workaround */
   13866 
   13867 		/* Set CHIP SELECT. */
   13868 		reg |= EECD_CS;
   13869 		CSR_WRITE(sc, WMREG_EECD, reg);
   13870 		CSR_WRITE_FLUSH(sc);
   13871 		delay(2);
   13872 
   13873 		/* Shift in the READ command. */
   13874 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13875 
   13876 		/* Shift in address. */
   13877 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13878 
   13879 		/* Shift out the data. */
   13880 		wm_eeprom_recvbits(sc, &val, 16);
   13881 		data[i] = val & 0xffff;
   13882 
   13883 		/* Clear CHIP SELECT. */
   13884 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13885 		CSR_WRITE(sc, WMREG_EECD, reg);
   13886 		CSR_WRITE_FLUSH(sc);
   13887 		delay(2);
   13888 	}
   13889 
   13890 	sc->nvm.release(sc);
   13891 	return 0;
   13892 }
   13893 
   13894 /* SPI */
   13895 
   13896 /*
   13897  * Set SPI and FLASH related information from the EECD register.
   13898  * For 82541 and 82547, the word size is taken from EEPROM.
   13899  */
   13900 static int
   13901 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13902 {
   13903 	int size;
   13904 	uint32_t reg;
   13905 	uint16_t data;
   13906 
   13907 	reg = CSR_READ(sc, WMREG_EECD);
   13908 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13909 
   13910 	/* Read the size of NVM from EECD by default */
   13911 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13912 	switch (sc->sc_type) {
   13913 	case WM_T_82541:
   13914 	case WM_T_82541_2:
   13915 	case WM_T_82547:
   13916 	case WM_T_82547_2:
   13917 		/* Set dummy value to access EEPROM */
   13918 		sc->sc_nvm_wordsize = 64;
   13919 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13920 			aprint_error_dev(sc->sc_dev,
   13921 			    "%s: failed to read EEPROM size\n", __func__);
   13922 		}
   13923 		reg = data;
   13924 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13925 		if (size == 0)
   13926 			size = 6; /* 64 word size */
   13927 		else
   13928 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13929 		break;
   13930 	case WM_T_80003:
   13931 	case WM_T_82571:
   13932 	case WM_T_82572:
   13933 	case WM_T_82573: /* SPI case */
   13934 	case WM_T_82574: /* SPI case */
   13935 	case WM_T_82583: /* SPI case */
   13936 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13937 		if (size > 14)
   13938 			size = 14;
   13939 		break;
   13940 	case WM_T_82575:
   13941 	case WM_T_82576:
   13942 	case WM_T_82580:
   13943 	case WM_T_I350:
   13944 	case WM_T_I354:
   13945 	case WM_T_I210:
   13946 	case WM_T_I211:
   13947 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13948 		if (size > 15)
   13949 			size = 15;
   13950 		break;
   13951 	default:
   13952 		aprint_error_dev(sc->sc_dev,
   13953 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13954 		return -1;
   13955 		break;
   13956 	}
   13957 
   13958 	sc->sc_nvm_wordsize = 1 << size;
   13959 
   13960 	return 0;
   13961 }
   13962 
   13963 /*
   13964  * wm_nvm_ready_spi:
   13965  *
   13966  *	Wait for a SPI EEPROM to be ready for commands.
   13967  */
   13968 static int
   13969 wm_nvm_ready_spi(struct wm_softc *sc)
   13970 {
   13971 	uint32_t val;
   13972 	int usec;
   13973 
   13974 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13975 		device_xname(sc->sc_dev), __func__));
   13976 
   13977 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13978 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13979 		wm_eeprom_recvbits(sc, &val, 8);
   13980 		if ((val & SPI_SR_RDY) == 0)
   13981 			break;
   13982 	}
   13983 	if (usec >= SPI_MAX_RETRIES) {
   13984 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13985 		return -1;
   13986 	}
   13987 	return 0;
   13988 }
   13989 
   13990 /*
   13991  * wm_nvm_read_spi:
   13992  *
   13993  *	Read a work from the EEPROM using the SPI protocol.
   13994  */
   13995 static int
   13996 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13997 {
   13998 	uint32_t reg, val;
   13999 	int i;
   14000 	uint8_t opc;
   14001 	int rv;
   14002 
   14003 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14004 		device_xname(sc->sc_dev), __func__));
   14005 
   14006 	rv = sc->nvm.acquire(sc);
   14007 	if (rv != 0)
   14008 		return rv;
   14009 
   14010 	/* Clear SK and CS. */
   14011 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   14012 	CSR_WRITE(sc, WMREG_EECD, reg);
   14013 	CSR_WRITE_FLUSH(sc);
   14014 	delay(2);
   14015 
   14016 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   14017 		goto out;
   14018 
   14019 	/* Toggle CS to flush commands. */
   14020 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   14021 	CSR_WRITE_FLUSH(sc);
   14022 	delay(2);
   14023 	CSR_WRITE(sc, WMREG_EECD, reg);
   14024 	CSR_WRITE_FLUSH(sc);
   14025 	delay(2);
   14026 
   14027 	opc = SPI_OPC_READ;
   14028 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   14029 		opc |= SPI_OPC_A8;
   14030 
   14031 	wm_eeprom_sendbits(sc, opc, 8);
   14032 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   14033 
   14034 	for (i = 0; i < wordcnt; i++) {
   14035 		wm_eeprom_recvbits(sc, &val, 16);
   14036 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   14037 	}
   14038 
   14039 	/* Raise CS and clear SK. */
   14040 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   14041 	CSR_WRITE(sc, WMREG_EECD, reg);
   14042 	CSR_WRITE_FLUSH(sc);
   14043 	delay(2);
   14044 
   14045 out:
   14046 	sc->nvm.release(sc);
   14047 	return rv;
   14048 }
   14049 
   14050 /* Using with EERD */
   14051 
   14052 static int
   14053 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   14054 {
   14055 	uint32_t attempts = 100000;
   14056 	uint32_t i, reg = 0;
   14057 	int32_t done = -1;
   14058 
   14059 	for (i = 0; i < attempts; i++) {
   14060 		reg = CSR_READ(sc, rw);
   14061 
   14062 		if (reg & EERD_DONE) {
   14063 			done = 0;
   14064 			break;
   14065 		}
   14066 		delay(5);
   14067 	}
   14068 
   14069 	return done;
   14070 }
   14071 
   14072 static int
   14073 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   14074 {
   14075 	int i, eerd = 0;
   14076 	int rv;
   14077 
   14078 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14079 		device_xname(sc->sc_dev), __func__));
   14080 
   14081 	rv = sc->nvm.acquire(sc);
   14082 	if (rv != 0)
   14083 		return rv;
   14084 
   14085 	for (i = 0; i < wordcnt; i++) {
   14086 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   14087 		CSR_WRITE(sc, WMREG_EERD, eerd);
   14088 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   14089 		if (rv != 0) {
   14090 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   14091 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   14092 			break;
   14093 		}
   14094 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   14095 	}
   14096 
   14097 	sc->nvm.release(sc);
   14098 	return rv;
   14099 }
   14100 
   14101 /* Flash */
   14102 
   14103 static int
   14104 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   14105 {
   14106 	uint32_t eecd;
   14107 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   14108 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   14109 	uint32_t nvm_dword = 0;
   14110 	uint8_t sig_byte = 0;
   14111 	int rv;
   14112 
   14113 	switch (sc->sc_type) {
   14114 	case WM_T_PCH_SPT:
   14115 	case WM_T_PCH_CNP:
   14116 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   14117 		act_offset = ICH_NVM_SIG_WORD * 2;
   14118 
   14119 		/* Set bank to 0 in case flash read fails. */
   14120 		*bank = 0;
   14121 
   14122 		/* Check bank 0 */
   14123 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   14124 		if (rv != 0)
   14125 			return rv;
   14126 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14127 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14128 			*bank = 0;
   14129 			return 0;
   14130 		}
   14131 
   14132 		/* Check bank 1 */
   14133 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   14134 		    &nvm_dword);
   14135 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   14136 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14137 			*bank = 1;
   14138 			return 0;
   14139 		}
   14140 		aprint_error_dev(sc->sc_dev,
   14141 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   14142 		return -1;
   14143 	case WM_T_ICH8:
   14144 	case WM_T_ICH9:
   14145 		eecd = CSR_READ(sc, WMREG_EECD);
   14146 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   14147 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   14148 			return 0;
   14149 		}
   14150 		/* FALLTHROUGH */
   14151 	default:
   14152 		/* Default to 0 */
   14153 		*bank = 0;
   14154 
   14155 		/* Check bank 0 */
   14156 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   14157 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14158 			*bank = 0;
   14159 			return 0;
   14160 		}
   14161 
   14162 		/* Check bank 1 */
   14163 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   14164 		    &sig_byte);
   14165 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   14166 			*bank = 1;
   14167 			return 0;
   14168 		}
   14169 	}
   14170 
   14171 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   14172 		device_xname(sc->sc_dev)));
   14173 	return -1;
   14174 }
   14175 
   14176 /******************************************************************************
   14177  * This function does initial flash setup so that a new read/write/erase cycle
   14178  * can be started.
   14179  *
   14180  * sc - The pointer to the hw structure
   14181  ****************************************************************************/
   14182 static int32_t
   14183 wm_ich8_cycle_init(struct wm_softc *sc)
   14184 {
   14185 	uint16_t hsfsts;
   14186 	int32_t error = 1;
   14187 	int32_t i     = 0;
   14188 
   14189 	if (sc->sc_type >= WM_T_PCH_SPT)
   14190 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   14191 	else
   14192 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14193 
   14194 	/* May be check the Flash Des Valid bit in Hw status */
   14195 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   14196 		return error;
   14197 
   14198 	/* Clear FCERR in Hw status by writing 1 */
   14199 	/* Clear DAEL in Hw status by writing a 1 */
   14200 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   14201 
   14202 	if (sc->sc_type >= WM_T_PCH_SPT)
   14203 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   14204 	else
   14205 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14206 
   14207 	/*
   14208 	 * Either we should have a hardware SPI cycle in progress bit to check
   14209 	 * against, in order to start a new cycle or FDONE bit should be
   14210 	 * changed in the hardware so that it is 1 after hardware reset, which
   14211 	 * can then be used as an indication whether a cycle is in progress or
   14212 	 * has been completed .. we should also have some software semaphore
   14213 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   14214 	 * threads access to those bits can be sequentiallized or a way so that
   14215 	 * 2 threads don't start the cycle at the same time
   14216 	 */
   14217 
   14218 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14219 		/*
   14220 		 * There is no cycle running at present, so we can start a
   14221 		 * cycle
   14222 		 */
   14223 
   14224 		/* Begin by setting Flash Cycle Done. */
   14225 		hsfsts |= HSFSTS_DONE;
   14226 		if (sc->sc_type >= WM_T_PCH_SPT)
   14227 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14228 			    hsfsts & 0xffffUL);
   14229 		else
   14230 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14231 		error = 0;
   14232 	} else {
   14233 		/*
   14234 		 * Otherwise poll for sometime so the current cycle has a
   14235 		 * chance to end before giving up.
   14236 		 */
   14237 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14238 			if (sc->sc_type >= WM_T_PCH_SPT)
   14239 				hsfsts = ICH8_FLASH_READ32(sc,
   14240 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14241 			else
   14242 				hsfsts = ICH8_FLASH_READ16(sc,
   14243 				    ICH_FLASH_HSFSTS);
   14244 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14245 				error = 0;
   14246 				break;
   14247 			}
   14248 			delay(1);
   14249 		}
   14250 		if (error == 0) {
   14251 			/*
   14252 			 * Successful in waiting for previous cycle to timeout,
   14253 			 * now set the Flash Cycle Done.
   14254 			 */
   14255 			hsfsts |= HSFSTS_DONE;
   14256 			if (sc->sc_type >= WM_T_PCH_SPT)
   14257 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14258 				    hsfsts & 0xffffUL);
   14259 			else
   14260 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14261 				    hsfsts);
   14262 		}
   14263 	}
   14264 	return error;
   14265 }
   14266 
   14267 /******************************************************************************
   14268  * This function starts a flash cycle and waits for its completion
   14269  *
   14270  * sc - The pointer to the hw structure
   14271  ****************************************************************************/
   14272 static int32_t
   14273 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14274 {
   14275 	uint16_t hsflctl;
   14276 	uint16_t hsfsts;
   14277 	int32_t error = 1;
   14278 	uint32_t i = 0;
   14279 
   14280 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14281 	if (sc->sc_type >= WM_T_PCH_SPT)
   14282 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14283 	else
   14284 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14285 	hsflctl |= HSFCTL_GO;
   14286 	if (sc->sc_type >= WM_T_PCH_SPT)
   14287 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14288 		    (uint32_t)hsflctl << 16);
   14289 	else
   14290 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14291 
   14292 	/* Wait till FDONE bit is set to 1 */
   14293 	do {
   14294 		if (sc->sc_type >= WM_T_PCH_SPT)
   14295 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14296 			    & 0xffffUL;
   14297 		else
   14298 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14299 		if (hsfsts & HSFSTS_DONE)
   14300 			break;
   14301 		delay(1);
   14302 		i++;
   14303 	} while (i < timeout);
   14304 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14305 		error = 0;
   14306 
   14307 	return error;
   14308 }
   14309 
   14310 /******************************************************************************
   14311  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14312  *
   14313  * sc - The pointer to the hw structure
   14314  * index - The index of the byte or word to read.
   14315  * size - Size of data to read, 1=byte 2=word, 4=dword
   14316  * data - Pointer to the word to store the value read.
   14317  *****************************************************************************/
   14318 static int32_t
   14319 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14320     uint32_t size, uint32_t *data)
   14321 {
   14322 	uint16_t hsfsts;
   14323 	uint16_t hsflctl;
   14324 	uint32_t flash_linear_address;
   14325 	uint32_t flash_data = 0;
   14326 	int32_t error = 1;
   14327 	int32_t count = 0;
   14328 
   14329 	if (size < 1  || size > 4 || data == 0x0 ||
   14330 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14331 		return error;
   14332 
   14333 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14334 	    sc->sc_ich8_flash_base;
   14335 
   14336 	do {
   14337 		delay(1);
   14338 		/* Steps */
   14339 		error = wm_ich8_cycle_init(sc);
   14340 		if (error)
   14341 			break;
   14342 
   14343 		if (sc->sc_type >= WM_T_PCH_SPT)
   14344 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14345 			    >> 16;
   14346 		else
   14347 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14348 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14349 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14350 		    & HSFCTL_BCOUNT_MASK;
   14351 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14352 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14353 			/*
   14354 			 * In SPT, This register is in Lan memory space, not
   14355 			 * flash. Therefore, only 32 bit access is supported.
   14356 			 */
   14357 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14358 			    (uint32_t)hsflctl << 16);
   14359 		} else
   14360 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14361 
   14362 		/*
   14363 		 * Write the last 24 bits of index into Flash Linear address
   14364 		 * field in Flash Address
   14365 		 */
   14366 		/* TODO: TBD maybe check the index against the size of flash */
   14367 
   14368 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14369 
   14370 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14371 
   14372 		/*
   14373 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14374 		 * the whole sequence a few more times, else read in (shift in)
   14375 		 * the Flash Data0, the order is least significant byte first
   14376 		 * msb to lsb
   14377 		 */
   14378 		if (error == 0) {
   14379 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14380 			if (size == 1)
   14381 				*data = (uint8_t)(flash_data & 0x000000FF);
   14382 			else if (size == 2)
   14383 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14384 			else if (size == 4)
   14385 				*data = (uint32_t)flash_data;
   14386 			break;
   14387 		} else {
   14388 			/*
   14389 			 * If we've gotten here, then things are probably
   14390 			 * completely hosed, but if the error condition is
   14391 			 * detected, it won't hurt to give it another try...
   14392 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14393 			 */
   14394 			if (sc->sc_type >= WM_T_PCH_SPT)
   14395 				hsfsts = ICH8_FLASH_READ32(sc,
   14396 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14397 			else
   14398 				hsfsts = ICH8_FLASH_READ16(sc,
   14399 				    ICH_FLASH_HSFSTS);
   14400 
   14401 			if (hsfsts & HSFSTS_ERR) {
   14402 				/* Repeat for some time before giving up. */
   14403 				continue;
   14404 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14405 				break;
   14406 		}
   14407 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14408 
   14409 	return error;
   14410 }
   14411 
   14412 /******************************************************************************
   14413  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14414  *
   14415  * sc - pointer to wm_hw structure
   14416  * index - The index of the byte to read.
   14417  * data - Pointer to a byte to store the value read.
   14418  *****************************************************************************/
   14419 static int32_t
   14420 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14421 {
   14422 	int32_t status;
   14423 	uint32_t word = 0;
   14424 
   14425 	status = wm_read_ich8_data(sc, index, 1, &word);
   14426 	if (status == 0)
   14427 		*data = (uint8_t)word;
   14428 	else
   14429 		*data = 0;
   14430 
   14431 	return status;
   14432 }
   14433 
   14434 /******************************************************************************
   14435  * Reads a word from the NVM using the ICH8 flash access registers.
   14436  *
   14437  * sc - pointer to wm_hw structure
   14438  * index - The starting byte index of the word to read.
   14439  * data - Pointer to a word to store the value read.
   14440  *****************************************************************************/
   14441 static int32_t
   14442 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14443 {
   14444 	int32_t status;
   14445 	uint32_t word = 0;
   14446 
   14447 	status = wm_read_ich8_data(sc, index, 2, &word);
   14448 	if (status == 0)
   14449 		*data = (uint16_t)word;
   14450 	else
   14451 		*data = 0;
   14452 
   14453 	return status;
   14454 }
   14455 
   14456 /******************************************************************************
   14457  * Reads a dword from the NVM using the ICH8 flash access registers.
   14458  *
   14459  * sc - pointer to wm_hw structure
   14460  * index - The starting byte index of the word to read.
   14461  * data - Pointer to a word to store the value read.
   14462  *****************************************************************************/
   14463 static int32_t
   14464 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14465 {
   14466 	int32_t status;
   14467 
   14468 	status = wm_read_ich8_data(sc, index, 4, data);
   14469 	return status;
   14470 }
   14471 
   14472 /******************************************************************************
   14473  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14474  * register.
   14475  *
   14476  * sc - Struct containing variables accessed by shared code
   14477  * offset - offset of word in the EEPROM to read
   14478  * data - word read from the EEPROM
   14479  * words - number of words to read
   14480  *****************************************************************************/
   14481 static int
   14482 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14483 {
   14484 	int rv;
   14485 	uint32_t flash_bank = 0;
   14486 	uint32_t act_offset = 0;
   14487 	uint32_t bank_offset = 0;
   14488 	uint16_t word = 0;
   14489 	uint16_t i = 0;
   14490 
   14491 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14492 		device_xname(sc->sc_dev), __func__));
   14493 
   14494 	rv = sc->nvm.acquire(sc);
   14495 	if (rv != 0)
   14496 		return rv;
   14497 
   14498 	/*
   14499 	 * We need to know which is the valid flash bank.  In the event
   14500 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14501 	 * managing flash_bank. So it cannot be trusted and needs
   14502 	 * to be updated with each read.
   14503 	 */
   14504 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14505 	if (rv) {
   14506 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14507 			device_xname(sc->sc_dev)));
   14508 		flash_bank = 0;
   14509 	}
   14510 
   14511 	/*
   14512 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14513 	 * size
   14514 	 */
   14515 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14516 
   14517 	for (i = 0; i < words; i++) {
   14518 		/* The NVM part needs a byte offset, hence * 2 */
   14519 		act_offset = bank_offset + ((offset + i) * 2);
   14520 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14521 		if (rv) {
   14522 			aprint_error_dev(sc->sc_dev,
   14523 			    "%s: failed to read NVM\n", __func__);
   14524 			break;
   14525 		}
   14526 		data[i] = word;
   14527 	}
   14528 
   14529 	sc->nvm.release(sc);
   14530 	return rv;
   14531 }
   14532 
   14533 /******************************************************************************
   14534  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14535  * register.
   14536  *
   14537  * sc - Struct containing variables accessed by shared code
   14538  * offset - offset of word in the EEPROM to read
   14539  * data - word read from the EEPROM
   14540  * words - number of words to read
   14541  *****************************************************************************/
   14542 static int
   14543 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14544 {
   14545 	int	 rv;
   14546 	uint32_t flash_bank = 0;
   14547 	uint32_t act_offset = 0;
   14548 	uint32_t bank_offset = 0;
   14549 	uint32_t dword = 0;
   14550 	uint16_t i = 0;
   14551 
   14552 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14553 		device_xname(sc->sc_dev), __func__));
   14554 
   14555 	rv = sc->nvm.acquire(sc);
   14556 	if (rv != 0)
   14557 		return rv;
   14558 
   14559 	/*
   14560 	 * We need to know which is the valid flash bank.  In the event
   14561 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14562 	 * managing flash_bank. So it cannot be trusted and needs
   14563 	 * to be updated with each read.
   14564 	 */
   14565 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14566 	if (rv) {
   14567 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14568 			device_xname(sc->sc_dev)));
   14569 		flash_bank = 0;
   14570 	}
   14571 
   14572 	/*
   14573 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14574 	 * size
   14575 	 */
   14576 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14577 
   14578 	for (i = 0; i < words; i++) {
   14579 		/* The NVM part needs a byte offset, hence * 2 */
   14580 		act_offset = bank_offset + ((offset + i) * 2);
   14581 		/* but we must read dword aligned, so mask ... */
   14582 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14583 		if (rv) {
   14584 			aprint_error_dev(sc->sc_dev,
   14585 			    "%s: failed to read NVM\n", __func__);
   14586 			break;
   14587 		}
   14588 		/* ... and pick out low or high word */
   14589 		if ((act_offset & 0x2) == 0)
   14590 			data[i] = (uint16_t)(dword & 0xFFFF);
   14591 		else
   14592 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14593 	}
   14594 
   14595 	sc->nvm.release(sc);
   14596 	return rv;
   14597 }
   14598 
   14599 /* iNVM */
   14600 
   14601 static int
   14602 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14603 {
   14604 	int32_t	 rv = 0;
   14605 	uint32_t invm_dword;
   14606 	uint16_t i;
   14607 	uint8_t record_type, word_address;
   14608 
   14609 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14610 		device_xname(sc->sc_dev), __func__));
   14611 
   14612 	for (i = 0; i < INVM_SIZE; i++) {
   14613 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14614 		/* Get record type */
   14615 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14616 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14617 			break;
   14618 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14619 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14620 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14621 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14622 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14623 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14624 			if (word_address == address) {
   14625 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14626 				rv = 0;
   14627 				break;
   14628 			}
   14629 		}
   14630 	}
   14631 
   14632 	return rv;
   14633 }
   14634 
   14635 static int
   14636 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14637 {
   14638 	int i, rv;
   14639 
   14640 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14641 		device_xname(sc->sc_dev), __func__));
   14642 
   14643 	rv = sc->nvm.acquire(sc);
   14644 	if (rv != 0)
   14645 		return rv;
   14646 
   14647 	for (i = 0; i < words; i++) {
   14648 		switch (offset + i) {
   14649 		case NVM_OFF_MACADDR:
   14650 		case NVM_OFF_MACADDR1:
   14651 		case NVM_OFF_MACADDR2:
   14652 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14653 			if (rv != 0) {
   14654 				data[i] = 0xffff;
   14655 				rv = -1;
   14656 			}
   14657 			break;
   14658 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14659 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14660 			if (rv != 0) {
   14661 				*data = INVM_DEFAULT_AL;
   14662 				rv = 0;
   14663 			}
   14664 			break;
   14665 		case NVM_OFF_CFG2:
   14666 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14667 			if (rv != 0) {
   14668 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14669 				rv = 0;
   14670 			}
   14671 			break;
   14672 		case NVM_OFF_CFG4:
   14673 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14674 			if (rv != 0) {
   14675 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14676 				rv = 0;
   14677 			}
   14678 			break;
   14679 		case NVM_OFF_LED_1_CFG:
   14680 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14681 			if (rv != 0) {
   14682 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14683 				rv = 0;
   14684 			}
   14685 			break;
   14686 		case NVM_OFF_LED_0_2_CFG:
   14687 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14688 			if (rv != 0) {
   14689 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14690 				rv = 0;
   14691 			}
   14692 			break;
   14693 		case NVM_OFF_ID_LED_SETTINGS:
   14694 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14695 			if (rv != 0) {
   14696 				*data = ID_LED_RESERVED_FFFF;
   14697 				rv = 0;
   14698 			}
   14699 			break;
   14700 		default:
   14701 			DPRINTF(sc, WM_DEBUG_NVM,
   14702 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14703 			*data = NVM_RESERVED_WORD;
   14704 			break;
   14705 		}
   14706 	}
   14707 
   14708 	sc->nvm.release(sc);
   14709 	return rv;
   14710 }
   14711 
   14712 /* Lock, detecting NVM type, validate checksum, version and read */
   14713 
   14714 static int
   14715 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14716 {
   14717 	uint32_t eecd = 0;
   14718 
   14719 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14720 	    || sc->sc_type == WM_T_82583) {
   14721 		eecd = CSR_READ(sc, WMREG_EECD);
   14722 
   14723 		/* Isolate bits 15 & 16 */
   14724 		eecd = ((eecd >> 15) & 0x03);
   14725 
   14726 		/* If both bits are set, device is Flash type */
   14727 		if (eecd == 0x03)
   14728 			return 0;
   14729 	}
   14730 	return 1;
   14731 }
   14732 
   14733 static int
   14734 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14735 {
   14736 	uint32_t eec;
   14737 
   14738 	eec = CSR_READ(sc, WMREG_EEC);
   14739 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14740 		return 1;
   14741 
   14742 	return 0;
   14743 }
   14744 
   14745 /*
   14746  * wm_nvm_validate_checksum
   14747  *
   14748  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14749  */
   14750 static int
   14751 wm_nvm_validate_checksum(struct wm_softc *sc)
   14752 {
   14753 	uint16_t checksum;
   14754 	uint16_t eeprom_data;
   14755 #ifdef WM_DEBUG
   14756 	uint16_t csum_wordaddr, valid_checksum;
   14757 #endif
   14758 	int i;
   14759 
   14760 	checksum = 0;
   14761 
   14762 	/* Don't check for I211 */
   14763 	if (sc->sc_type == WM_T_I211)
   14764 		return 0;
   14765 
   14766 #ifdef WM_DEBUG
   14767 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14768 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14769 		csum_wordaddr = NVM_OFF_COMPAT;
   14770 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14771 	} else {
   14772 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14773 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14774 	}
   14775 
   14776 	/* Dump EEPROM image for debug */
   14777 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14778 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14779 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14780 		/* XXX PCH_SPT? */
   14781 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14782 		if ((eeprom_data & valid_checksum) == 0)
   14783 			DPRINTF(sc, WM_DEBUG_NVM,
   14784 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14785 				device_xname(sc->sc_dev), eeprom_data,
   14786 				valid_checksum));
   14787 	}
   14788 
   14789 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14790 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14791 		for (i = 0; i < NVM_SIZE; i++) {
   14792 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14793 				printf("XXXX ");
   14794 			else
   14795 				printf("%04hx ", eeprom_data);
   14796 			if (i % 8 == 7)
   14797 				printf("\n");
   14798 		}
   14799 	}
   14800 
   14801 #endif /* WM_DEBUG */
   14802 
   14803 	for (i = 0; i < NVM_SIZE; i++) {
   14804 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14805 			return -1;
   14806 		checksum += eeprom_data;
   14807 	}
   14808 
   14809 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14810 #ifdef WM_DEBUG
   14811 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14812 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14813 #endif
   14814 	}
   14815 
   14816 	return 0;
   14817 }
   14818 
   14819 static void
   14820 wm_nvm_version_invm(struct wm_softc *sc)
   14821 {
   14822 	uint32_t dword;
   14823 
   14824 	/*
   14825 	 * Linux's code to decode version is very strange, so we don't
   14826 	 * obey that algorithm and just use word 61 as the document.
   14827 	 * Perhaps it's not perfect though...
   14828 	 *
   14829 	 * Example:
   14830 	 *
   14831 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14832 	 */
   14833 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14834 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14835 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14836 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14837 }
   14838 
   14839 static void
   14840 wm_nvm_version(struct wm_softc *sc)
   14841 {
   14842 	uint16_t major, minor, build, patch;
   14843 	uint16_t uid0, uid1;
   14844 	uint16_t nvm_data;
   14845 	uint16_t off;
   14846 	bool check_version = false;
   14847 	bool check_optionrom = false;
   14848 	bool have_build = false;
   14849 	bool have_uid = true;
   14850 
   14851 	/*
   14852 	 * Version format:
   14853 	 *
   14854 	 * XYYZ
   14855 	 * X0YZ
   14856 	 * X0YY
   14857 	 *
   14858 	 * Example:
   14859 	 *
   14860 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14861 	 *	82571	0x50a6	5.10.6?
   14862 	 *	82572	0x506a	5.6.10?
   14863 	 *	82572EI	0x5069	5.6.9?
   14864 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14865 	 *		0x2013	2.1.3?
   14866 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14867 	 * ICH8+82567	0x0040	0.4.0?
   14868 	 * ICH9+82566	0x1040	1.4.0?
   14869 	 *ICH10+82567	0x0043	0.4.3?
   14870 	 *  PCH+82577	0x00c1	0.12.1?
   14871 	 * PCH2+82579	0x00d3	0.13.3?
   14872 	 *		0x00d4	0.13.4?
   14873 	 *  LPT+I218	0x0023	0.2.3?
   14874 	 *  SPT+I219	0x0084	0.8.4?
   14875 	 *  CNP+I219	0x0054	0.5.4?
   14876 	 */
   14877 
   14878 	/*
   14879 	 * XXX
   14880 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14881 	 * I've never seen real 82574 hardware with such small SPI ROM.
   14882 	 */
   14883 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14884 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14885 		have_uid = false;
   14886 
   14887 	switch (sc->sc_type) {
   14888 	case WM_T_82571:
   14889 	case WM_T_82572:
   14890 	case WM_T_82574:
   14891 	case WM_T_82583:
   14892 		check_version = true;
   14893 		check_optionrom = true;
   14894 		have_build = true;
   14895 		break;
   14896 	case WM_T_ICH8:
   14897 	case WM_T_ICH9:
   14898 	case WM_T_ICH10:
   14899 	case WM_T_PCH:
   14900 	case WM_T_PCH2:
   14901 	case WM_T_PCH_LPT:
   14902 	case WM_T_PCH_SPT:
   14903 	case WM_T_PCH_CNP:
   14904 		check_version = true;
   14905 		have_build = true;
   14906 		have_uid = false;
   14907 		break;
   14908 	case WM_T_82575:
   14909 	case WM_T_82576:
   14910 	case WM_T_82580:
   14911 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14912 			check_version = true;
   14913 		break;
   14914 	case WM_T_I211:
   14915 		wm_nvm_version_invm(sc);
   14916 		have_uid = false;
   14917 		goto printver;
   14918 	case WM_T_I210:
   14919 		if (!wm_nvm_flash_presence_i210(sc)) {
   14920 			wm_nvm_version_invm(sc);
   14921 			have_uid = false;
   14922 			goto printver;
   14923 		}
   14924 		/* FALLTHROUGH */
   14925 	case WM_T_I350:
   14926 	case WM_T_I354:
   14927 		check_version = true;
   14928 		check_optionrom = true;
   14929 		break;
   14930 	default:
   14931 		return;
   14932 	}
   14933 	if (check_version
   14934 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14935 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14936 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14937 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14938 			build = nvm_data & NVM_BUILD_MASK;
   14939 			have_build = true;
   14940 		} else
   14941 			minor = nvm_data & 0x00ff;
   14942 
   14943 		/* Decimal */
   14944 		minor = (minor / 16) * 10 + (minor % 16);
   14945 		sc->sc_nvm_ver_major = major;
   14946 		sc->sc_nvm_ver_minor = minor;
   14947 
   14948 printver:
   14949 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14950 		    sc->sc_nvm_ver_minor);
   14951 		if (have_build) {
   14952 			sc->sc_nvm_ver_build = build;
   14953 			aprint_verbose(".%d", build);
   14954 		}
   14955 	}
   14956 
   14957 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14958 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14959 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14960 		/* Option ROM Version */
   14961 		if ((off != 0x0000) && (off != 0xffff)) {
   14962 			int rv;
   14963 
   14964 			off += NVM_COMBO_VER_OFF;
   14965 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14966 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14967 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14968 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14969 				/* 16bits */
   14970 				major = uid0 >> 8;
   14971 				build = (uid0 << 8) | (uid1 >> 8);
   14972 				patch = uid1 & 0x00ff;
   14973 				aprint_verbose(", option ROM Version %d.%d.%d",
   14974 				    major, build, patch);
   14975 			}
   14976 		}
   14977 	}
   14978 
   14979 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14980 		aprint_verbose(", Image Unique ID %08x",
   14981 		    ((uint32_t)uid1 << 16) | uid0);
   14982 }
   14983 
   14984 /*
   14985  * wm_nvm_read:
   14986  *
   14987  *	Read data from the serial EEPROM.
   14988  */
   14989 static int
   14990 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14991 {
   14992 	int rv;
   14993 
   14994 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14995 		device_xname(sc->sc_dev), __func__));
   14996 
   14997 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14998 		return -1;
   14999 
   15000 	rv = sc->nvm.read(sc, word, wordcnt, data);
   15001 
   15002 	return rv;
   15003 }
   15004 
   15005 /*
   15006  * Hardware semaphores.
   15007  * Very complexed...
   15008  */
   15009 
   15010 static int
   15011 wm_get_null(struct wm_softc *sc)
   15012 {
   15013 
   15014 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15015 		device_xname(sc->sc_dev), __func__));
   15016 	return 0;
   15017 }
   15018 
   15019 static void
   15020 wm_put_null(struct wm_softc *sc)
   15021 {
   15022 
   15023 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15024 		device_xname(sc->sc_dev), __func__));
   15025 	return;
   15026 }
   15027 
   15028 static int
   15029 wm_get_eecd(struct wm_softc *sc)
   15030 {
   15031 	uint32_t reg;
   15032 	int x;
   15033 
   15034 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15035 		device_xname(sc->sc_dev), __func__));
   15036 
   15037 	reg = CSR_READ(sc, WMREG_EECD);
   15038 
   15039 	/* Request EEPROM access. */
   15040 	reg |= EECD_EE_REQ;
   15041 	CSR_WRITE(sc, WMREG_EECD, reg);
   15042 
   15043 	/* ..and wait for it to be granted. */
   15044 	for (x = 0; x < 1000; x++) {
   15045 		reg = CSR_READ(sc, WMREG_EECD);
   15046 		if (reg & EECD_EE_GNT)
   15047 			break;
   15048 		delay(5);
   15049 	}
   15050 	if ((reg & EECD_EE_GNT) == 0) {
   15051 		aprint_error_dev(sc->sc_dev,
   15052 		    "could not acquire EEPROM GNT\n");
   15053 		reg &= ~EECD_EE_REQ;
   15054 		CSR_WRITE(sc, WMREG_EECD, reg);
   15055 		return -1;
   15056 	}
   15057 
   15058 	return 0;
   15059 }
   15060 
   15061 static void
   15062 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   15063 {
   15064 
   15065 	*eecd |= EECD_SK;
   15066 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15067 	CSR_WRITE_FLUSH(sc);
   15068 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15069 		delay(1);
   15070 	else
   15071 		delay(50);
   15072 }
   15073 
   15074 static void
   15075 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   15076 {
   15077 
   15078 	*eecd &= ~EECD_SK;
   15079 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   15080 	CSR_WRITE_FLUSH(sc);
   15081 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   15082 		delay(1);
   15083 	else
   15084 		delay(50);
   15085 }
   15086 
   15087 static void
   15088 wm_put_eecd(struct wm_softc *sc)
   15089 {
   15090 	uint32_t reg;
   15091 
   15092 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15093 		device_xname(sc->sc_dev), __func__));
   15094 
   15095 	/* Stop nvm */
   15096 	reg = CSR_READ(sc, WMREG_EECD);
   15097 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   15098 		/* Pull CS high */
   15099 		reg |= EECD_CS;
   15100 		wm_nvm_eec_clock_lower(sc, &reg);
   15101 	} else {
   15102 		/* CS on Microwire is active-high */
   15103 		reg &= ~(EECD_CS | EECD_DI);
   15104 		CSR_WRITE(sc, WMREG_EECD, reg);
   15105 		wm_nvm_eec_clock_raise(sc, &reg);
   15106 		wm_nvm_eec_clock_lower(sc, &reg);
   15107 	}
   15108 
   15109 	reg = CSR_READ(sc, WMREG_EECD);
   15110 	reg &= ~EECD_EE_REQ;
   15111 	CSR_WRITE(sc, WMREG_EECD, reg);
   15112 
   15113 	return;
   15114 }
   15115 
   15116 /*
   15117  * Get hardware semaphore.
   15118  * Same as e1000_get_hw_semaphore_generic()
   15119  */
   15120 static int
   15121 wm_get_swsm_semaphore(struct wm_softc *sc)
   15122 {
   15123 	int32_t timeout;
   15124 	uint32_t swsm;
   15125 
   15126 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15127 		device_xname(sc->sc_dev), __func__));
   15128 	KASSERT(sc->sc_nvm_wordsize > 0);
   15129 
   15130 retry:
   15131 	/* Get the SW semaphore. */
   15132 	timeout = sc->sc_nvm_wordsize + 1;
   15133 	while (timeout) {
   15134 		swsm = CSR_READ(sc, WMREG_SWSM);
   15135 
   15136 		if ((swsm & SWSM_SMBI) == 0)
   15137 			break;
   15138 
   15139 		delay(50);
   15140 		timeout--;
   15141 	}
   15142 
   15143 	if (timeout == 0) {
   15144 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   15145 			/*
   15146 			 * In rare circumstances, the SW semaphore may already
   15147 			 * be held unintentionally. Clear the semaphore once
   15148 			 * before giving up.
   15149 			 */
   15150 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   15151 			wm_put_swsm_semaphore(sc);
   15152 			goto retry;
   15153 		}
   15154 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   15155 		return -1;
   15156 	}
   15157 
   15158 	/* Get the FW semaphore. */
   15159 	timeout = sc->sc_nvm_wordsize + 1;
   15160 	while (timeout) {
   15161 		swsm = CSR_READ(sc, WMREG_SWSM);
   15162 		swsm |= SWSM_SWESMBI;
   15163 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   15164 		/* If we managed to set the bit we got the semaphore. */
   15165 		swsm = CSR_READ(sc, WMREG_SWSM);
   15166 		if (swsm & SWSM_SWESMBI)
   15167 			break;
   15168 
   15169 		delay(50);
   15170 		timeout--;
   15171 	}
   15172 
   15173 	if (timeout == 0) {
   15174 		aprint_error_dev(sc->sc_dev,
   15175 		    "could not acquire SWSM SWESMBI\n");
   15176 		/* Release semaphores */
   15177 		wm_put_swsm_semaphore(sc);
   15178 		return -1;
   15179 	}
   15180 	return 0;
   15181 }
   15182 
   15183 /*
   15184  * Put hardware semaphore.
   15185  * Same as e1000_put_hw_semaphore_generic()
   15186  */
   15187 static void
   15188 wm_put_swsm_semaphore(struct wm_softc *sc)
   15189 {
   15190 	uint32_t swsm;
   15191 
   15192 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15193 		device_xname(sc->sc_dev), __func__));
   15194 
   15195 	swsm = CSR_READ(sc, WMREG_SWSM);
   15196 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   15197 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   15198 }
   15199 
   15200 /*
   15201  * Get SW/FW semaphore.
   15202  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   15203  */
   15204 static int
   15205 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15206 {
   15207 	uint32_t swfw_sync;
   15208 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   15209 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   15210 	int timeout;
   15211 
   15212 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15213 		device_xname(sc->sc_dev), __func__));
   15214 
   15215 	if (sc->sc_type == WM_T_80003)
   15216 		timeout = 50;
   15217 	else
   15218 		timeout = 200;
   15219 
   15220 	while (timeout) {
   15221 		if (wm_get_swsm_semaphore(sc)) {
   15222 			aprint_error_dev(sc->sc_dev,
   15223 			    "%s: failed to get semaphore\n",
   15224 			    __func__);
   15225 			return -1;
   15226 		}
   15227 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15228 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15229 			swfw_sync |= swmask;
   15230 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15231 			wm_put_swsm_semaphore(sc);
   15232 			return 0;
   15233 		}
   15234 		wm_put_swsm_semaphore(sc);
   15235 		delay(5000);
   15236 		timeout--;
   15237 	}
   15238 	device_printf(sc->sc_dev,
   15239 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15240 	    mask, swfw_sync);
   15241 	return -1;
   15242 }
   15243 
   15244 static void
   15245 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15246 {
   15247 	uint32_t swfw_sync;
   15248 
   15249 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15250 		device_xname(sc->sc_dev), __func__));
   15251 
   15252 	while (wm_get_swsm_semaphore(sc) != 0)
   15253 		continue;
   15254 
   15255 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15256 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15257 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15258 
   15259 	wm_put_swsm_semaphore(sc);
   15260 }
   15261 
   15262 static int
   15263 wm_get_nvm_80003(struct wm_softc *sc)
   15264 {
   15265 	int rv;
   15266 
   15267 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15268 		device_xname(sc->sc_dev), __func__));
   15269 
   15270 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15271 		aprint_error_dev(sc->sc_dev,
   15272 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15273 		return rv;
   15274 	}
   15275 
   15276 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15277 	    && (rv = wm_get_eecd(sc)) != 0) {
   15278 		aprint_error_dev(sc->sc_dev,
   15279 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15280 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15281 		return rv;
   15282 	}
   15283 
   15284 	return 0;
   15285 }
   15286 
   15287 static void
   15288 wm_put_nvm_80003(struct wm_softc *sc)
   15289 {
   15290 
   15291 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15292 		device_xname(sc->sc_dev), __func__));
   15293 
   15294 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15295 		wm_put_eecd(sc);
   15296 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15297 }
   15298 
   15299 static int
   15300 wm_get_nvm_82571(struct wm_softc *sc)
   15301 {
   15302 	int rv;
   15303 
   15304 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15305 		device_xname(sc->sc_dev), __func__));
   15306 
   15307 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15308 		return rv;
   15309 
   15310 	switch (sc->sc_type) {
   15311 	case WM_T_82573:
   15312 		break;
   15313 	default:
   15314 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15315 			rv = wm_get_eecd(sc);
   15316 		break;
   15317 	}
   15318 
   15319 	if (rv != 0) {
   15320 		aprint_error_dev(sc->sc_dev,
   15321 		    "%s: failed to get semaphore\n",
   15322 		    __func__);
   15323 		wm_put_swsm_semaphore(sc);
   15324 	}
   15325 
   15326 	return rv;
   15327 }
   15328 
   15329 static void
   15330 wm_put_nvm_82571(struct wm_softc *sc)
   15331 {
   15332 
   15333 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15334 		device_xname(sc->sc_dev), __func__));
   15335 
   15336 	switch (sc->sc_type) {
   15337 	case WM_T_82573:
   15338 		break;
   15339 	default:
   15340 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15341 			wm_put_eecd(sc);
   15342 		break;
   15343 	}
   15344 
   15345 	wm_put_swsm_semaphore(sc);
   15346 }
   15347 
   15348 static int
   15349 wm_get_phy_82575(struct wm_softc *sc)
   15350 {
   15351 
   15352 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15353 		device_xname(sc->sc_dev), __func__));
   15354 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15355 }
   15356 
   15357 static void
   15358 wm_put_phy_82575(struct wm_softc *sc)
   15359 {
   15360 
   15361 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15362 		device_xname(sc->sc_dev), __func__));
   15363 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15364 }
   15365 
   15366 static int
   15367 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15368 {
   15369 	uint32_t ext_ctrl;
   15370 	int timeout = 200;
   15371 
   15372 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15373 		device_xname(sc->sc_dev), __func__));
   15374 
   15375 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15376 	for (timeout = 0; timeout < 200; timeout++) {
   15377 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15378 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15379 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15380 
   15381 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15382 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15383 			return 0;
   15384 		delay(5000);
   15385 	}
   15386 	device_printf(sc->sc_dev,
   15387 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15388 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15389 	return -1;
   15390 }
   15391 
   15392 static void
   15393 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15394 {
   15395 	uint32_t ext_ctrl;
   15396 
   15397 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15398 		device_xname(sc->sc_dev), __func__));
   15399 
   15400 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15401 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15402 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15403 
   15404 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15405 }
   15406 
   15407 static int
   15408 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15409 {
   15410 	uint32_t ext_ctrl;
   15411 	int timeout;
   15412 
   15413 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15414 		device_xname(sc->sc_dev), __func__));
   15415 	mutex_enter(sc->sc_ich_phymtx);
   15416 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15417 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15418 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15419 			break;
   15420 		delay(1000);
   15421 	}
   15422 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15423 		device_printf(sc->sc_dev,
   15424 		    "SW has already locked the resource\n");
   15425 		goto out;
   15426 	}
   15427 
   15428 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15429 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15430 	for (timeout = 0; timeout < 1000; timeout++) {
   15431 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15432 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15433 			break;
   15434 		delay(1000);
   15435 	}
   15436 	if (timeout >= 1000) {
   15437 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15438 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15439 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15440 		goto out;
   15441 	}
   15442 	return 0;
   15443 
   15444 out:
   15445 	mutex_exit(sc->sc_ich_phymtx);
   15446 	return -1;
   15447 }
   15448 
   15449 static void
   15450 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15451 {
   15452 	uint32_t ext_ctrl;
   15453 
   15454 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15455 		device_xname(sc->sc_dev), __func__));
   15456 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15457 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15458 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15459 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15460 	} else
   15461 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15462 
   15463 	mutex_exit(sc->sc_ich_phymtx);
   15464 }
   15465 
   15466 static int
   15467 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15468 {
   15469 
   15470 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15471 		device_xname(sc->sc_dev), __func__));
   15472 	mutex_enter(sc->sc_ich_nvmmtx);
   15473 
   15474 	return 0;
   15475 }
   15476 
   15477 static void
   15478 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15479 {
   15480 
   15481 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15482 		device_xname(sc->sc_dev), __func__));
   15483 	mutex_exit(sc->sc_ich_nvmmtx);
   15484 }
   15485 
   15486 static int
   15487 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15488 {
   15489 	int i = 0;
   15490 	uint32_t reg;
   15491 
   15492 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15493 		device_xname(sc->sc_dev), __func__));
   15494 
   15495 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15496 	do {
   15497 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15498 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15499 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15500 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15501 			break;
   15502 		delay(2*1000);
   15503 		i++;
   15504 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15505 
   15506 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15507 		wm_put_hw_semaphore_82573(sc);
   15508 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15509 		    device_xname(sc->sc_dev));
   15510 		return -1;
   15511 	}
   15512 
   15513 	return 0;
   15514 }
   15515 
   15516 static void
   15517 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15518 {
   15519 	uint32_t reg;
   15520 
   15521 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15522 		device_xname(sc->sc_dev), __func__));
   15523 
   15524 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15525 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15526 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15527 }
   15528 
   15529 /*
   15530  * Management mode and power management related subroutines.
   15531  * BMC, AMT, suspend/resume and EEE.
   15532  */
   15533 
   15534 #ifdef WM_WOL
   15535 static int
   15536 wm_check_mng_mode(struct wm_softc *sc)
   15537 {
   15538 	int rv;
   15539 
   15540 	switch (sc->sc_type) {
   15541 	case WM_T_ICH8:
   15542 	case WM_T_ICH9:
   15543 	case WM_T_ICH10:
   15544 	case WM_T_PCH:
   15545 	case WM_T_PCH2:
   15546 	case WM_T_PCH_LPT:
   15547 	case WM_T_PCH_SPT:
   15548 	case WM_T_PCH_CNP:
   15549 		rv = wm_check_mng_mode_ich8lan(sc);
   15550 		break;
   15551 	case WM_T_82574:
   15552 	case WM_T_82583:
   15553 		rv = wm_check_mng_mode_82574(sc);
   15554 		break;
   15555 	case WM_T_82571:
   15556 	case WM_T_82572:
   15557 	case WM_T_82573:
   15558 	case WM_T_80003:
   15559 		rv = wm_check_mng_mode_generic(sc);
   15560 		break;
   15561 	default:
   15562 		/* Noting to do */
   15563 		rv = 0;
   15564 		break;
   15565 	}
   15566 
   15567 	return rv;
   15568 }
   15569 
   15570 static int
   15571 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15572 {
   15573 	uint32_t fwsm;
   15574 
   15575 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15576 
   15577 	if (((fwsm & FWSM_FW_VALID) != 0)
   15578 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15579 		return 1;
   15580 
   15581 	return 0;
   15582 }
   15583 
   15584 static int
   15585 wm_check_mng_mode_82574(struct wm_softc *sc)
   15586 {
   15587 	uint16_t data;
   15588 
   15589 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15590 
   15591 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15592 		return 1;
   15593 
   15594 	return 0;
   15595 }
   15596 
   15597 static int
   15598 wm_check_mng_mode_generic(struct wm_softc *sc)
   15599 {
   15600 	uint32_t fwsm;
   15601 
   15602 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15603 
   15604 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15605 		return 1;
   15606 
   15607 	return 0;
   15608 }
   15609 #endif /* WM_WOL */
   15610 
   15611 static int
   15612 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15613 {
   15614 	uint32_t manc, fwsm, factps;
   15615 
   15616 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15617 		return 0;
   15618 
   15619 	manc = CSR_READ(sc, WMREG_MANC);
   15620 
   15621 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15622 		device_xname(sc->sc_dev), manc));
   15623 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15624 		return 0;
   15625 
   15626 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15627 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15628 		factps = CSR_READ(sc, WMREG_FACTPS);
   15629 		if (((factps & FACTPS_MNGCG) == 0)
   15630 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15631 			return 1;
   15632 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15633 		uint16_t data;
   15634 
   15635 		factps = CSR_READ(sc, WMREG_FACTPS);
   15636 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15637 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15638 			device_xname(sc->sc_dev), factps, data));
   15639 		if (((factps & FACTPS_MNGCG) == 0)
   15640 		    && ((data & NVM_CFG2_MNGM_MASK)
   15641 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15642 			return 1;
   15643 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15644 	    && ((manc & MANC_ASF_EN) == 0))
   15645 		return 1;
   15646 
   15647 	return 0;
   15648 }
   15649 
   15650 static bool
   15651 wm_phy_resetisblocked(struct wm_softc *sc)
   15652 {
   15653 	bool blocked = false;
   15654 	uint32_t reg;
   15655 	int i = 0;
   15656 
   15657 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15658 		device_xname(sc->sc_dev), __func__));
   15659 
   15660 	switch (sc->sc_type) {
   15661 	case WM_T_ICH8:
   15662 	case WM_T_ICH9:
   15663 	case WM_T_ICH10:
   15664 	case WM_T_PCH:
   15665 	case WM_T_PCH2:
   15666 	case WM_T_PCH_LPT:
   15667 	case WM_T_PCH_SPT:
   15668 	case WM_T_PCH_CNP:
   15669 		do {
   15670 			reg = CSR_READ(sc, WMREG_FWSM);
   15671 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15672 				blocked = true;
   15673 				delay(10*1000);
   15674 				continue;
   15675 			}
   15676 			blocked = false;
   15677 		} while (blocked && (i++ < 30));
   15678 		return blocked;
   15679 		break;
   15680 	case WM_T_82571:
   15681 	case WM_T_82572:
   15682 	case WM_T_82573:
   15683 	case WM_T_82574:
   15684 	case WM_T_82583:
   15685 	case WM_T_80003:
   15686 		reg = CSR_READ(sc, WMREG_MANC);
   15687 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15688 			return true;
   15689 		else
   15690 			return false;
   15691 		break;
   15692 	default:
   15693 		/* No problem */
   15694 		break;
   15695 	}
   15696 
   15697 	return false;
   15698 }
   15699 
   15700 static void
   15701 wm_get_hw_control(struct wm_softc *sc)
   15702 {
   15703 	uint32_t reg;
   15704 
   15705 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15706 		device_xname(sc->sc_dev), __func__));
   15707 
   15708 	if (sc->sc_type == WM_T_82573) {
   15709 		reg = CSR_READ(sc, WMREG_SWSM);
   15710 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15711 	} else if (sc->sc_type >= WM_T_82571) {
   15712 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15713 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15714 	}
   15715 }
   15716 
   15717 static void
   15718 wm_release_hw_control(struct wm_softc *sc)
   15719 {
   15720 	uint32_t reg;
   15721 
   15722 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15723 		device_xname(sc->sc_dev), __func__));
   15724 
   15725 	if (sc->sc_type == WM_T_82573) {
   15726 		reg = CSR_READ(sc, WMREG_SWSM);
   15727 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15728 	} else if (sc->sc_type >= WM_T_82571) {
   15729 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15730 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15731 	}
   15732 }
   15733 
   15734 static void
   15735 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15736 {
   15737 	uint32_t reg;
   15738 
   15739 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15740 		device_xname(sc->sc_dev), __func__));
   15741 
   15742 	if (sc->sc_type < WM_T_PCH2)
   15743 		return;
   15744 
   15745 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15746 
   15747 	if (gate)
   15748 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15749 	else
   15750 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15751 
   15752 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15753 }
   15754 
   15755 static int
   15756 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15757 {
   15758 	uint32_t fwsm, reg;
   15759 	int rv;
   15760 
   15761 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15762 		device_xname(sc->sc_dev), __func__));
   15763 
   15764 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15765 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15766 
   15767 	/* Disable ULP */
   15768 	wm_ulp_disable(sc);
   15769 
   15770 	/* Acquire PHY semaphore */
   15771 	rv = sc->phy.acquire(sc);
   15772 	if (rv != 0) {
   15773 		DPRINTF(sc, WM_DEBUG_INIT,
   15774 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   15775 		return rv;
   15776 	}
   15777 
   15778 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15779 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15780 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15781 	 */
   15782 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15783 	switch (sc->sc_type) {
   15784 	case WM_T_PCH_LPT:
   15785 	case WM_T_PCH_SPT:
   15786 	case WM_T_PCH_CNP:
   15787 		if (wm_phy_is_accessible_pchlan(sc))
   15788 			break;
   15789 
   15790 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15791 		 * forcing MAC to SMBus mode first.
   15792 		 */
   15793 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15794 		reg |= CTRL_EXT_FORCE_SMBUS;
   15795 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15796 #if 0
   15797 		/* XXX Isn't this required??? */
   15798 		CSR_WRITE_FLUSH(sc);
   15799 #endif
   15800 		/* Wait 50 milliseconds for MAC to finish any retries
   15801 		 * that it might be trying to perform from previous
   15802 		 * attempts to acknowledge any phy read requests.
   15803 		 */
   15804 		delay(50 * 1000);
   15805 		/* FALLTHROUGH */
   15806 	case WM_T_PCH2:
   15807 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15808 			break;
   15809 		/* FALLTHROUGH */
   15810 	case WM_T_PCH:
   15811 		if (sc->sc_type == WM_T_PCH)
   15812 			if ((fwsm & FWSM_FW_VALID) != 0)
   15813 				break;
   15814 
   15815 		if (wm_phy_resetisblocked(sc) == true) {
   15816 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   15817 			break;
   15818 		}
   15819 
   15820 		/* Toggle LANPHYPC Value bit */
   15821 		wm_toggle_lanphypc_pch_lpt(sc);
   15822 
   15823 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15824 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15825 				break;
   15826 
   15827 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15828 			 * so ensure that the MAC is also out of SMBus mode
   15829 			 */
   15830 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15831 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15832 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15833 
   15834 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15835 				break;
   15836 			rv = -1;
   15837 		}
   15838 		break;
   15839 	default:
   15840 		break;
   15841 	}
   15842 
   15843 	/* Release semaphore */
   15844 	sc->phy.release(sc);
   15845 
   15846 	if (rv == 0) {
   15847 		/* Check to see if able to reset PHY.  Print error if not */
   15848 		if (wm_phy_resetisblocked(sc)) {
   15849 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15850 			goto out;
   15851 		}
   15852 
   15853 		/* Reset the PHY before any access to it.  Doing so, ensures
   15854 		 * that the PHY is in a known good state before we read/write
   15855 		 * PHY registers.  The generic reset is sufficient here,
   15856 		 * because we haven't determined the PHY type yet.
   15857 		 */
   15858 		if (wm_reset_phy(sc) != 0)
   15859 			goto out;
   15860 
   15861 		/* On a successful reset, possibly need to wait for the PHY
   15862 		 * to quiesce to an accessible state before returning control
   15863 		 * to the calling function.  If the PHY does not quiesce, then
   15864 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15865 		 *  the PHY is in.
   15866 		 */
   15867 		if (wm_phy_resetisblocked(sc))
   15868 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15869 	}
   15870 
   15871 out:
   15872 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15873 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15874 		delay(10*1000);
   15875 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15876 	}
   15877 
   15878 	return 0;
   15879 }
   15880 
   15881 static void
   15882 wm_init_manageability(struct wm_softc *sc)
   15883 {
   15884 
   15885 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15886 		device_xname(sc->sc_dev), __func__));
   15887 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   15888 
   15889 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15890 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15891 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15892 
   15893 		/* Disable hardware interception of ARP */
   15894 		manc &= ~MANC_ARP_EN;
   15895 
   15896 		/* Enable receiving management packets to the host */
   15897 		if (sc->sc_type >= WM_T_82571) {
   15898 			manc |= MANC_EN_MNG2HOST;
   15899 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15900 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15901 		}
   15902 
   15903 		CSR_WRITE(sc, WMREG_MANC, manc);
   15904 	}
   15905 }
   15906 
   15907 static void
   15908 wm_release_manageability(struct wm_softc *sc)
   15909 {
   15910 
   15911 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15912 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15913 
   15914 		manc |= MANC_ARP_EN;
   15915 		if (sc->sc_type >= WM_T_82571)
   15916 			manc &= ~MANC_EN_MNG2HOST;
   15917 
   15918 		CSR_WRITE(sc, WMREG_MANC, manc);
   15919 	}
   15920 }
   15921 
   15922 static void
   15923 wm_get_wakeup(struct wm_softc *sc)
   15924 {
   15925 
   15926 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15927 	switch (sc->sc_type) {
   15928 	case WM_T_82573:
   15929 	case WM_T_82583:
   15930 		sc->sc_flags |= WM_F_HAS_AMT;
   15931 		/* FALLTHROUGH */
   15932 	case WM_T_80003:
   15933 	case WM_T_82575:
   15934 	case WM_T_82576:
   15935 	case WM_T_82580:
   15936 	case WM_T_I350:
   15937 	case WM_T_I354:
   15938 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15939 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15940 		/* FALLTHROUGH */
   15941 	case WM_T_82541:
   15942 	case WM_T_82541_2:
   15943 	case WM_T_82547:
   15944 	case WM_T_82547_2:
   15945 	case WM_T_82571:
   15946 	case WM_T_82572:
   15947 	case WM_T_82574:
   15948 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15949 		break;
   15950 	case WM_T_ICH8:
   15951 	case WM_T_ICH9:
   15952 	case WM_T_ICH10:
   15953 	case WM_T_PCH:
   15954 	case WM_T_PCH2:
   15955 	case WM_T_PCH_LPT:
   15956 	case WM_T_PCH_SPT:
   15957 	case WM_T_PCH_CNP:
   15958 		sc->sc_flags |= WM_F_HAS_AMT;
   15959 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15960 		break;
   15961 	default:
   15962 		break;
   15963 	}
   15964 
   15965 	/* 1: HAS_MANAGE */
   15966 	if (wm_enable_mng_pass_thru(sc) != 0)
   15967 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15968 
   15969 	/*
   15970 	 * Note that the WOL flags is set after the resetting of the eeprom
   15971 	 * stuff
   15972 	 */
   15973 }
   15974 
   15975 /*
   15976  * Unconfigure Ultra Low Power mode.
   15977  * Only for I217 and newer (see below).
   15978  */
   15979 static int
   15980 wm_ulp_disable(struct wm_softc *sc)
   15981 {
   15982 	uint32_t reg;
   15983 	uint16_t phyreg;
   15984 	int i = 0, rv;
   15985 
   15986 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15987 		device_xname(sc->sc_dev), __func__));
   15988 	/* Exclude old devices */
   15989 	if ((sc->sc_type < WM_T_PCH_LPT)
   15990 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15991 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15992 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15993 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15994 		return 0;
   15995 
   15996 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15997 		/* Request ME un-configure ULP mode in the PHY */
   15998 		reg = CSR_READ(sc, WMREG_H2ME);
   15999 		reg &= ~H2ME_ULP;
   16000 		reg |= H2ME_ENFORCE_SETTINGS;
   16001 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16002 
   16003 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   16004 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   16005 			if (i++ == 30) {
   16006 				device_printf(sc->sc_dev, "%s timed out\n",
   16007 				    __func__);
   16008 				return -1;
   16009 			}
   16010 			delay(10 * 1000);
   16011 		}
   16012 		reg = CSR_READ(sc, WMREG_H2ME);
   16013 		reg &= ~H2ME_ENFORCE_SETTINGS;
   16014 		CSR_WRITE(sc, WMREG_H2ME, reg);
   16015 
   16016 		return 0;
   16017 	}
   16018 
   16019 	/* Acquire semaphore */
   16020 	rv = sc->phy.acquire(sc);
   16021 	if (rv != 0) {
   16022 		DPRINTF(sc, WM_DEBUG_INIT,
   16023 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
   16024 		return rv;
   16025 	}
   16026 
   16027 	/* Toggle LANPHYPC */
   16028 	wm_toggle_lanphypc_pch_lpt(sc);
   16029 
   16030 	/* Unforce SMBus mode in PHY */
   16031 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   16032 	if (rv != 0) {
   16033 		uint32_t reg2;
   16034 
   16035 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   16036 		    __func__);
   16037 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   16038 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   16039 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   16040 		delay(50 * 1000);
   16041 
   16042 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   16043 		    &phyreg);
   16044 		if (rv != 0)
   16045 			goto release;
   16046 	}
   16047 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16048 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   16049 
   16050 	/* Unforce SMBus mode in MAC */
   16051 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16052 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   16053 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16054 
   16055 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   16056 	if (rv != 0)
   16057 		goto release;
   16058 	phyreg |= HV_PM_CTRL_K1_ENA;
   16059 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   16060 
   16061 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   16062 	    &phyreg);
   16063 	if (rv != 0)
   16064 		goto release;
   16065 	phyreg &= ~(I218_ULP_CONFIG1_IND
   16066 	    | I218_ULP_CONFIG1_STICKY_ULP
   16067 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   16068 	    | I218_ULP_CONFIG1_WOL_HOST
   16069 	    | I218_ULP_CONFIG1_INBAND_EXIT
   16070 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   16071 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   16072 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   16073 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16074 	phyreg |= I218_ULP_CONFIG1_START;
   16075 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   16076 
   16077 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16078 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   16079 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16080 
   16081 release:
   16082 	/* Release semaphore */
   16083 	sc->phy.release(sc);
   16084 	wm_gmii_reset(sc);
   16085 	delay(50 * 1000);
   16086 
   16087 	return rv;
   16088 }
   16089 
   16090 /* WOL in the newer chipset interfaces (pchlan) */
   16091 static int
   16092 wm_enable_phy_wakeup(struct wm_softc *sc)
   16093 {
   16094 	device_t dev = sc->sc_dev;
   16095 	uint32_t mreg, moff;
   16096 	uint16_t wuce, wuc, wufc, preg;
   16097 	int i, rv;
   16098 
   16099 	KASSERT(sc->sc_type >= WM_T_PCH);
   16100 
   16101 	/* Copy MAC RARs to PHY RARs */
   16102 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   16103 
   16104 	/* Activate PHY wakeup */
   16105 	rv = sc->phy.acquire(sc);
   16106 	if (rv != 0) {
   16107 		device_printf(dev, "%s: failed to acquire semaphore\n",
   16108 		    __func__);
   16109 		return rv;
   16110 	}
   16111 
   16112 	/*
   16113 	 * Enable access to PHY wakeup registers.
   16114 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   16115 	 */
   16116 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   16117 	if (rv != 0) {
   16118 		device_printf(dev,
   16119 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   16120 		goto release;
   16121 	}
   16122 
   16123 	/* Copy MAC MTA to PHY MTA */
   16124 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   16125 		uint16_t lo, hi;
   16126 
   16127 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   16128 		lo = (uint16_t)(mreg & 0xffff);
   16129 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   16130 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   16131 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   16132 	}
   16133 
   16134 	/* Configure PHY Rx Control register */
   16135 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   16136 	mreg = CSR_READ(sc, WMREG_RCTL);
   16137 	if (mreg & RCTL_UPE)
   16138 		preg |= BM_RCTL_UPE;
   16139 	if (mreg & RCTL_MPE)
   16140 		preg |= BM_RCTL_MPE;
   16141 	preg &= ~(BM_RCTL_MO_MASK);
   16142 	moff = __SHIFTOUT(mreg, RCTL_MO);
   16143 	if (moff != 0)
   16144 		preg |= moff << BM_RCTL_MO_SHIFT;
   16145 	if (mreg & RCTL_BAM)
   16146 		preg |= BM_RCTL_BAM;
   16147 	if (mreg & RCTL_PMCF)
   16148 		preg |= BM_RCTL_PMCF;
   16149 	mreg = CSR_READ(sc, WMREG_CTRL);
   16150 	if (mreg & CTRL_RFCE)
   16151 		preg |= BM_RCTL_RFCE;
   16152 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   16153 
   16154 	wuc = WUC_APME | WUC_PME_EN;
   16155 	wufc = WUFC_MAG;
   16156 	/* Enable PHY wakeup in MAC register */
   16157 	CSR_WRITE(sc, WMREG_WUC,
   16158 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   16159 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   16160 
   16161 	/* Configure and enable PHY wakeup in PHY registers */
   16162 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   16163 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   16164 
   16165 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   16166 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16167 
   16168 release:
   16169 	sc->phy.release(sc);
   16170 
   16171 	return 0;
   16172 }
   16173 
   16174 /* Power down workaround on D3 */
   16175 static void
   16176 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   16177 {
   16178 	uint32_t reg;
   16179 	uint16_t phyreg;
   16180 	int i;
   16181 
   16182 	for (i = 0; i < 2; i++) {
   16183 		/* Disable link */
   16184 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16185 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16186 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16187 
   16188 		/*
   16189 		 * Call gig speed drop workaround on Gig disable before
   16190 		 * accessing any PHY registers
   16191 		 */
   16192 		if (sc->sc_type == WM_T_ICH8)
   16193 			wm_gig_downshift_workaround_ich8lan(sc);
   16194 
   16195 		/* Write VR power-down enable */
   16196 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16197 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16198 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   16199 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   16200 
   16201 		/* Read it back and test */
   16202 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   16203 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   16204 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   16205 			break;
   16206 
   16207 		/* Issue PHY reset and repeat at most one more time */
   16208 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   16209 	}
   16210 }
   16211 
   16212 /*
   16213  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   16214  *  @sc: pointer to the HW structure
   16215  *
   16216  *  During S0 to Sx transition, it is possible the link remains at gig
   16217  *  instead of negotiating to a lower speed.  Before going to Sx, set
   16218  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16219  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16220  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16221  *  needs to be written.
   16222  *  Parts that support (and are linked to a partner which support) EEE in
   16223  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16224  *  than 10Mbps w/o EEE.
   16225  */
   16226 static void
   16227 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16228 {
   16229 	device_t dev = sc->sc_dev;
   16230 	struct ethercom *ec = &sc->sc_ethercom;
   16231 	uint32_t phy_ctrl;
   16232 	int rv;
   16233 
   16234 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16235 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16236 
   16237 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16238 
   16239 	if (sc->sc_phytype == WMPHY_I217) {
   16240 		uint16_t devid = sc->sc_pcidevid;
   16241 
   16242 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16243 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16244 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16245 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16246 		    (sc->sc_type >= WM_T_PCH_SPT))
   16247 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16248 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16249 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16250 
   16251 		if (sc->phy.acquire(sc) != 0)
   16252 			goto out;
   16253 
   16254 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16255 			uint16_t eee_advert;
   16256 
   16257 			rv = wm_read_emi_reg_locked(dev,
   16258 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16259 			if (rv)
   16260 				goto release;
   16261 
   16262 			/*
   16263 			 * Disable LPLU if both link partners support 100BaseT
   16264 			 * EEE and 100Full is advertised on both ends of the
   16265 			 * link, and enable Auto Enable LPI since there will
   16266 			 * be no driver to enable LPI while in Sx.
   16267 			 */
   16268 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16269 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16270 				uint16_t anar, phy_reg;
   16271 
   16272 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16273 				    &anar);
   16274 				if (anar & ANAR_TX_FD) {
   16275 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16276 					    PHY_CTRL_NOND0A_LPLU);
   16277 
   16278 					/* Set Auto Enable LPI after link up */
   16279 					sc->phy.readreg_locked(dev, 2,
   16280 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16281 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16282 					sc->phy.writereg_locked(dev, 2,
   16283 					    I217_LPI_GPIO_CTRL, phy_reg);
   16284 				}
   16285 			}
   16286 		}
   16287 
   16288 		/*
   16289 		 * For i217 Intel Rapid Start Technology support,
   16290 		 * when the system is going into Sx and no manageability engine
   16291 		 * is present, the driver must configure proxy to reset only on
   16292 		 * power good.	LPI (Low Power Idle) state must also reset only
   16293 		 * on power good, as well as the MTA (Multicast table array).
   16294 		 * The SMBus release must also be disabled on LCD reset.
   16295 		 */
   16296 
   16297 		/*
   16298 		 * Enable MTA to reset for Intel Rapid Start Technology
   16299 		 * Support
   16300 		 */
   16301 
   16302 release:
   16303 		sc->phy.release(sc);
   16304 	}
   16305 out:
   16306 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16307 
   16308 	if (sc->sc_type == WM_T_ICH8)
   16309 		wm_gig_downshift_workaround_ich8lan(sc);
   16310 
   16311 	if (sc->sc_type >= WM_T_PCH) {
   16312 		wm_oem_bits_config_ich8lan(sc, false);
   16313 
   16314 		/* Reset PHY to activate OEM bits on 82577/8 */
   16315 		if (sc->sc_type == WM_T_PCH)
   16316 			wm_reset_phy(sc);
   16317 
   16318 		if (sc->phy.acquire(sc) != 0)
   16319 			return;
   16320 		wm_write_smbus_addr(sc);
   16321 		sc->phy.release(sc);
   16322 	}
   16323 }
   16324 
   16325 /*
   16326  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16327  *  @sc: pointer to the HW structure
   16328  *
   16329  *  During Sx to S0 transitions on non-managed devices or managed devices
   16330  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16331  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16332  *  the PHY.
   16333  *  On i217, setup Intel Rapid Start Technology.
   16334  */
   16335 static int
   16336 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16337 {
   16338 	device_t dev = sc->sc_dev;
   16339 	int rv;
   16340 
   16341 	if (sc->sc_type < WM_T_PCH2)
   16342 		return 0;
   16343 
   16344 	rv = wm_init_phy_workarounds_pchlan(sc);
   16345 	if (rv != 0)
   16346 		return rv;
   16347 
   16348 	/* For i217 Intel Rapid Start Technology support when the system
   16349 	 * is transitioning from Sx and no manageability engine is present
   16350 	 * configure SMBus to restore on reset, disable proxy, and enable
   16351 	 * the reset on MTA (Multicast table array).
   16352 	 */
   16353 	if (sc->sc_phytype == WMPHY_I217) {
   16354 		uint16_t phy_reg;
   16355 
   16356 		rv = sc->phy.acquire(sc);
   16357 		if (rv != 0)
   16358 			return rv;
   16359 
   16360 		/* Clear Auto Enable LPI after link up */
   16361 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16362 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16363 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16364 
   16365 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16366 			/* Restore clear on SMB if no manageability engine
   16367 			 * is present
   16368 			 */
   16369 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16370 			    &phy_reg);
   16371 			if (rv != 0)
   16372 				goto release;
   16373 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16374 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16375 
   16376 			/* Disable Proxy */
   16377 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16378 		}
   16379 		/* Enable reset on MTA */
   16380 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16381 		if (rv != 0)
   16382 			goto release;
   16383 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16384 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16385 
   16386 release:
   16387 		sc->phy.release(sc);
   16388 		return rv;
   16389 	}
   16390 
   16391 	return 0;
   16392 }
   16393 
   16394 static void
   16395 wm_enable_wakeup(struct wm_softc *sc)
   16396 {
   16397 	uint32_t reg, pmreg;
   16398 	pcireg_t pmode;
   16399 	int rv = 0;
   16400 
   16401 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16402 		device_xname(sc->sc_dev), __func__));
   16403 
   16404 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16405 	    &pmreg, NULL) == 0)
   16406 		return;
   16407 
   16408 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16409 		goto pme;
   16410 
   16411 	/* Advertise the wakeup capability */
   16412 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16413 	    | CTRL_SWDPIN(3));
   16414 
   16415 	/* Keep the laser running on fiber adapters */
   16416 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16417 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16418 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16419 		reg |= CTRL_EXT_SWDPIN(3);
   16420 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16421 	}
   16422 
   16423 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16424 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16425 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16426 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16427 		wm_suspend_workarounds_ich8lan(sc);
   16428 
   16429 #if 0	/* For the multicast packet */
   16430 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16431 	reg |= WUFC_MC;
   16432 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16433 #endif
   16434 
   16435 	if (sc->sc_type >= WM_T_PCH) {
   16436 		rv = wm_enable_phy_wakeup(sc);
   16437 		if (rv != 0)
   16438 			goto pme;
   16439 	} else {
   16440 		/* Enable wakeup by the MAC */
   16441 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16442 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16443 	}
   16444 
   16445 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16446 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16447 		|| (sc->sc_type == WM_T_PCH2))
   16448 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16449 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16450 
   16451 pme:
   16452 	/* Request PME */
   16453 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16454 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16455 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16456 		/* For WOL */
   16457 		pmode |= PCI_PMCSR_PME_EN;
   16458 	} else {
   16459 		/* Disable WOL */
   16460 		pmode &= ~PCI_PMCSR_PME_EN;
   16461 	}
   16462 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16463 }
   16464 
   16465 /* Disable ASPM L0s and/or L1 for workaround */
   16466 static void
   16467 wm_disable_aspm(struct wm_softc *sc)
   16468 {
   16469 	pcireg_t reg, mask = 0;
   16470 	unsigned const char *str = "";
   16471 
   16472 	/*
   16473 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16474 	 * space.
   16475 	 */
   16476 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16477 		return;
   16478 
   16479 	switch (sc->sc_type) {
   16480 	case WM_T_82571:
   16481 	case WM_T_82572:
   16482 		/*
   16483 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16484 		 * State Power management L1 State (ASPM L1).
   16485 		 */
   16486 		mask = PCIE_LCSR_ASPM_L1;
   16487 		str = "L1 is";
   16488 		break;
   16489 	case WM_T_82573:
   16490 	case WM_T_82574:
   16491 	case WM_T_82583:
   16492 		/*
   16493 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16494 		 *
   16495 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16496 		 * some chipset.  The document of 82574 and 82583 says that
   16497 		 * disabling L0s with some specific chipset is sufficient,
   16498 		 * but we follow as of the Intel em driver does.
   16499 		 *
   16500 		 * References:
   16501 		 * Errata 8 of the Specification Update of i82573.
   16502 		 * Errata 20 of the Specification Update of i82574.
   16503 		 * Errata 9 of the Specification Update of i82583.
   16504 		 */
   16505 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16506 		str = "L0s and L1 are";
   16507 		break;
   16508 	default:
   16509 		return;
   16510 	}
   16511 
   16512 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16513 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16514 	reg &= ~mask;
   16515 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16516 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16517 
   16518 	/* Print only in wm_attach() */
   16519 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16520 		aprint_verbose_dev(sc->sc_dev,
   16521 		    "ASPM %s disabled to workaround the errata.\n", str);
   16522 }
   16523 
   16524 /* LPLU */
   16525 
   16526 static void
   16527 wm_lplu_d0_disable(struct wm_softc *sc)
   16528 {
   16529 	struct mii_data *mii = &sc->sc_mii;
   16530 	uint32_t reg;
   16531 	uint16_t phyval;
   16532 
   16533 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16534 		device_xname(sc->sc_dev), __func__));
   16535 
   16536 	if (sc->sc_phytype == WMPHY_IFE)
   16537 		return;
   16538 
   16539 	switch (sc->sc_type) {
   16540 	case WM_T_82571:
   16541 	case WM_T_82572:
   16542 	case WM_T_82573:
   16543 	case WM_T_82575:
   16544 	case WM_T_82576:
   16545 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16546 		phyval &= ~PMR_D0_LPLU;
   16547 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16548 		break;
   16549 	case WM_T_82580:
   16550 	case WM_T_I350:
   16551 	case WM_T_I210:
   16552 	case WM_T_I211:
   16553 		reg = CSR_READ(sc, WMREG_PHPM);
   16554 		reg &= ~PHPM_D0A_LPLU;
   16555 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16556 		break;
   16557 	case WM_T_82574:
   16558 	case WM_T_82583:
   16559 	case WM_T_ICH8:
   16560 	case WM_T_ICH9:
   16561 	case WM_T_ICH10:
   16562 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16563 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16564 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16565 		CSR_WRITE_FLUSH(sc);
   16566 		break;
   16567 	case WM_T_PCH:
   16568 	case WM_T_PCH2:
   16569 	case WM_T_PCH_LPT:
   16570 	case WM_T_PCH_SPT:
   16571 	case WM_T_PCH_CNP:
   16572 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16573 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16574 		if (wm_phy_resetisblocked(sc) == false)
   16575 			phyval |= HV_OEM_BITS_ANEGNOW;
   16576 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16577 		break;
   16578 	default:
   16579 		break;
   16580 	}
   16581 }
   16582 
   16583 /* EEE */
   16584 
   16585 static int
   16586 wm_set_eee_i350(struct wm_softc *sc)
   16587 {
   16588 	struct ethercom *ec = &sc->sc_ethercom;
   16589 	uint32_t ipcnfg, eeer;
   16590 	uint32_t ipcnfg_mask
   16591 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16592 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16593 
   16594 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16595 
   16596 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16597 	eeer = CSR_READ(sc, WMREG_EEER);
   16598 
   16599 	/* Enable or disable per user setting */
   16600 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16601 		ipcnfg |= ipcnfg_mask;
   16602 		eeer |= eeer_mask;
   16603 	} else {
   16604 		ipcnfg &= ~ipcnfg_mask;
   16605 		eeer &= ~eeer_mask;
   16606 	}
   16607 
   16608 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16609 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16610 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16611 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16612 
   16613 	return 0;
   16614 }
   16615 
   16616 static int
   16617 wm_set_eee_pchlan(struct wm_softc *sc)
   16618 {
   16619 	device_t dev = sc->sc_dev;
   16620 	struct ethercom *ec = &sc->sc_ethercom;
   16621 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16622 	int rv;
   16623 
   16624 	switch (sc->sc_phytype) {
   16625 	case WMPHY_82579:
   16626 		lpa = I82579_EEE_LP_ABILITY;
   16627 		pcs_status = I82579_EEE_PCS_STATUS;
   16628 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16629 		break;
   16630 	case WMPHY_I217:
   16631 		lpa = I217_EEE_LP_ABILITY;
   16632 		pcs_status = I217_EEE_PCS_STATUS;
   16633 		adv_addr = I217_EEE_ADVERTISEMENT;
   16634 		break;
   16635 	default:
   16636 		return 0;
   16637 	}
   16638 
   16639 	rv = sc->phy.acquire(sc);
   16640 	if (rv != 0) {
   16641 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16642 		return rv;
   16643 	}
   16644 
   16645 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16646 	if (rv != 0)
   16647 		goto release;
   16648 
   16649 	/* Clear bits that enable EEE in various speeds */
   16650 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16651 
   16652 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16653 		/* Save off link partner's EEE ability */
   16654 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16655 		if (rv != 0)
   16656 			goto release;
   16657 
   16658 		/* Read EEE advertisement */
   16659 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16660 			goto release;
   16661 
   16662 		/*
   16663 		 * Enable EEE only for speeds in which the link partner is
   16664 		 * EEE capable and for which we advertise EEE.
   16665 		 */
   16666 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16667 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16668 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16669 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16670 			if ((data & ANLPAR_TX_FD) != 0)
   16671 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16672 			else {
   16673 				/*
   16674 				 * EEE is not supported in 100Half, so ignore
   16675 				 * partner's EEE in 100 ability if full-duplex
   16676 				 * is not advertised.
   16677 				 */
   16678 				sc->eee_lp_ability
   16679 				    &= ~AN_EEEADVERT_100_TX;
   16680 			}
   16681 		}
   16682 	}
   16683 
   16684 	if (sc->sc_phytype == WMPHY_82579) {
   16685 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16686 		if (rv != 0)
   16687 			goto release;
   16688 
   16689 		data &= ~I82579_LPI_PLL_SHUT_100;
   16690 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16691 	}
   16692 
   16693 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16694 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16695 		goto release;
   16696 
   16697 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16698 release:
   16699 	sc->phy.release(sc);
   16700 
   16701 	return rv;
   16702 }
   16703 
   16704 static int
   16705 wm_set_eee(struct wm_softc *sc)
   16706 {
   16707 	struct ethercom *ec = &sc->sc_ethercom;
   16708 
   16709 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16710 		return 0;
   16711 
   16712 	if (sc->sc_type == WM_T_I354) {
   16713 		/* I354 uses an external PHY */
   16714 		return 0; /* not yet */
   16715 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16716 		return wm_set_eee_i350(sc);
   16717 	else if (sc->sc_type >= WM_T_PCH2)
   16718 		return wm_set_eee_pchlan(sc);
   16719 
   16720 	return 0;
   16721 }
   16722 
   16723 /*
   16724  * Workarounds (mainly PHY related).
   16725  * Basically, PHY's workarounds are in the PHY drivers.
   16726  */
   16727 
   16728 /* Workaround for 82566 Kumeran PCS lock loss */
   16729 static int
   16730 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16731 {
   16732 	struct mii_data *mii = &sc->sc_mii;
   16733 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16734 	int i, reg, rv;
   16735 	uint16_t phyreg;
   16736 
   16737 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16738 		device_xname(sc->sc_dev), __func__));
   16739 
   16740 	/* If the link is not up, do nothing */
   16741 	if ((status & STATUS_LU) == 0)
   16742 		return 0;
   16743 
   16744 	/* Nothing to do if the link is other than 1Gbps */
   16745 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16746 		return 0;
   16747 
   16748 	for (i = 0; i < 10; i++) {
   16749 		/* read twice */
   16750 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16751 		if (rv != 0)
   16752 			return rv;
   16753 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16754 		if (rv != 0)
   16755 			return rv;
   16756 
   16757 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16758 			goto out;	/* GOOD! */
   16759 
   16760 		/* Reset the PHY */
   16761 		wm_reset_phy(sc);
   16762 		delay(5*1000);
   16763 	}
   16764 
   16765 	/* Disable GigE link negotiation */
   16766 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16767 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16768 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16769 
   16770 	/*
   16771 	 * Call gig speed drop workaround on Gig disable before accessing
   16772 	 * any PHY registers.
   16773 	 */
   16774 	wm_gig_downshift_workaround_ich8lan(sc);
   16775 
   16776 out:
   16777 	return 0;
   16778 }
   16779 
   16780 /*
   16781  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16782  *  @sc: pointer to the HW structure
   16783  *
   16784  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16785  *  LPLU, Gig disable, MDIC PHY reset):
   16786  *    1) Set Kumeran Near-end loopback
   16787  *    2) Clear Kumeran Near-end loopback
   16788  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16789  */
   16790 static void
   16791 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16792 {
   16793 	uint16_t kmreg;
   16794 
   16795 	/* Only for igp3 */
   16796 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16797 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16798 			return;
   16799 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16800 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16801 			return;
   16802 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16803 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16804 	}
   16805 }
   16806 
   16807 /*
   16808  * Workaround for pch's PHYs
   16809  * XXX should be moved to new PHY driver?
   16810  */
   16811 static int
   16812 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16813 {
   16814 	device_t dev = sc->sc_dev;
   16815 	struct mii_data *mii = &sc->sc_mii;
   16816 	struct mii_softc *child;
   16817 	uint16_t phy_data, phyrev = 0;
   16818 	int phytype = sc->sc_phytype;
   16819 	int rv;
   16820 
   16821 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16822 		device_xname(dev), __func__));
   16823 	KASSERT(sc->sc_type == WM_T_PCH);
   16824 
   16825 	/* Set MDIO slow mode before any other MDIO access */
   16826 	if (phytype == WMPHY_82577)
   16827 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16828 			return rv;
   16829 
   16830 	child = LIST_FIRST(&mii->mii_phys);
   16831 	if (child != NULL)
   16832 		phyrev = child->mii_mpd_rev;
   16833 
   16834 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16835 	if ((child != NULL) &&
   16836 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16837 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16838 		/* Disable generation of early preamble (0x4431) */
   16839 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16840 		    &phy_data);
   16841 		if (rv != 0)
   16842 			return rv;
   16843 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16844 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16845 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16846 		    phy_data);
   16847 		if (rv != 0)
   16848 			return rv;
   16849 
   16850 		/* Preamble tuning for SSC */
   16851 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16852 		if (rv != 0)
   16853 			return rv;
   16854 	}
   16855 
   16856 	/* 82578 */
   16857 	if (phytype == WMPHY_82578) {
   16858 		/*
   16859 		 * Return registers to default by doing a soft reset then
   16860 		 * writing 0x3140 to the control register
   16861 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16862 		 */
   16863 		if ((child != NULL) && (phyrev < 2)) {
   16864 			PHY_RESET(child);
   16865 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16866 			if (rv != 0)
   16867 				return rv;
   16868 		}
   16869 	}
   16870 
   16871 	/* Select page 0 */
   16872 	if ((rv = sc->phy.acquire(sc)) != 0)
   16873 		return rv;
   16874 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16875 	sc->phy.release(sc);
   16876 	if (rv != 0)
   16877 		return rv;
   16878 
   16879 	/*
   16880 	 * Configure the K1 Si workaround during phy reset assuming there is
   16881 	 * link so that it disables K1 if link is in 1Gbps.
   16882 	 */
   16883 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16884 		return rv;
   16885 
   16886 	/* Workaround for link disconnects on a busy hub in half duplex */
   16887 	rv = sc->phy.acquire(sc);
   16888 	if (rv)
   16889 		return rv;
   16890 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16891 	if (rv)
   16892 		goto release;
   16893 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16894 	    phy_data & 0x00ff);
   16895 	if (rv)
   16896 		goto release;
   16897 
   16898 	/* Set MSE higher to enable link to stay up when noise is high */
   16899 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16900 release:
   16901 	sc->phy.release(sc);
   16902 
   16903 	return rv;
   16904 }
   16905 
   16906 /*
   16907  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16908  *  @sc:   pointer to the HW structure
   16909  */
   16910 static void
   16911 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16912 {
   16913 
   16914 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16915 		device_xname(sc->sc_dev), __func__));
   16916 
   16917 	if (sc->phy.acquire(sc) != 0)
   16918 		return;
   16919 
   16920 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16921 
   16922 	sc->phy.release(sc);
   16923 }
   16924 
   16925 static void
   16926 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16927 {
   16928 	device_t dev = sc->sc_dev;
   16929 	uint32_t mac_reg;
   16930 	uint16_t i, wuce;
   16931 	int count;
   16932 
   16933 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16934 		device_xname(dev), __func__));
   16935 
   16936 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16937 		return;
   16938 
   16939 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16940 	count = wm_rar_count(sc);
   16941 	for (i = 0; i < count; i++) {
   16942 		uint16_t lo, hi;
   16943 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16944 		lo = (uint16_t)(mac_reg & 0xffff);
   16945 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16946 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16947 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16948 
   16949 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16950 		lo = (uint16_t)(mac_reg & 0xffff);
   16951 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16952 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16953 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16954 	}
   16955 
   16956 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16957 }
   16958 
   16959 /*
   16960  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16961  *  with 82579 PHY
   16962  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16963  */
   16964 static int
   16965 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16966 {
   16967 	device_t dev = sc->sc_dev;
   16968 	int rar_count;
   16969 	int rv;
   16970 	uint32_t mac_reg;
   16971 	uint16_t dft_ctrl, data;
   16972 	uint16_t i;
   16973 
   16974 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16975 		device_xname(dev), __func__));
   16976 
   16977 	if (sc->sc_type < WM_T_PCH2)
   16978 		return 0;
   16979 
   16980 	/* Acquire PHY semaphore */
   16981 	rv = sc->phy.acquire(sc);
   16982 	if (rv != 0)
   16983 		return rv;
   16984 
   16985 	/* Disable Rx path while enabling/disabling workaround */
   16986 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16987 	if (rv != 0)
   16988 		goto out;
   16989 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16990 	    dft_ctrl | (1 << 14));
   16991 	if (rv != 0)
   16992 		goto out;
   16993 
   16994 	if (enable) {
   16995 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16996 		 * SHRAL/H) and initial CRC values to the MAC
   16997 		 */
   16998 		rar_count = wm_rar_count(sc);
   16999 		for (i = 0; i < rar_count; i++) {
   17000 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   17001 			uint32_t addr_high, addr_low;
   17002 
   17003 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   17004 			if (!(addr_high & RAL_AV))
   17005 				continue;
   17006 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   17007 			mac_addr[0] = (addr_low & 0xFF);
   17008 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   17009 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   17010 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   17011 			mac_addr[4] = (addr_high & 0xFF);
   17012 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   17013 
   17014 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   17015 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   17016 		}
   17017 
   17018 		/* Write Rx addresses to the PHY */
   17019 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   17020 	}
   17021 
   17022 	/*
   17023 	 * If enable ==
   17024 	 *	true: Enable jumbo frame workaround in the MAC.
   17025 	 *	false: Write MAC register values back to h/w defaults.
   17026 	 */
   17027 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   17028 	if (enable) {
   17029 		mac_reg &= ~(1 << 14);
   17030 		mac_reg |= (7 << 15);
   17031 	} else
   17032 		mac_reg &= ~(0xf << 14);
   17033 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   17034 
   17035 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   17036 	if (enable) {
   17037 		mac_reg |= RCTL_SECRC;
   17038 		sc->sc_rctl |= RCTL_SECRC;
   17039 		sc->sc_flags |= WM_F_CRC_STRIP;
   17040 	} else {
   17041 		mac_reg &= ~RCTL_SECRC;
   17042 		sc->sc_rctl &= ~RCTL_SECRC;
   17043 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   17044 	}
   17045 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   17046 
   17047 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   17048 	if (rv != 0)
   17049 		goto out;
   17050 	if (enable)
   17051 		data |= 1 << 0;
   17052 	else
   17053 		data &= ~(1 << 0);
   17054 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   17055 	if (rv != 0)
   17056 		goto out;
   17057 
   17058 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   17059 	if (rv != 0)
   17060 		goto out;
   17061 	/*
   17062 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   17063 	 * on both the enable case and the disable case. Is it correct?
   17064 	 */
   17065 	data &= ~(0xf << 8);
   17066 	data |= (0xb << 8);
   17067 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   17068 	if (rv != 0)
   17069 		goto out;
   17070 
   17071 	/*
   17072 	 * If enable ==
   17073 	 *	true: Enable jumbo frame workaround in the PHY.
   17074 	 *	false: Write PHY register values back to h/w defaults.
   17075 	 */
   17076 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   17077 	if (rv != 0)
   17078 		goto out;
   17079 	data &= ~(0x7F << 5);
   17080 	if (enable)
   17081 		data |= (0x37 << 5);
   17082 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   17083 	if (rv != 0)
   17084 		goto out;
   17085 
   17086 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   17087 	if (rv != 0)
   17088 		goto out;
   17089 	if (enable)
   17090 		data &= ~(1 << 13);
   17091 	else
   17092 		data |= (1 << 13);
   17093 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   17094 	if (rv != 0)
   17095 		goto out;
   17096 
   17097 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   17098 	if (rv != 0)
   17099 		goto out;
   17100 	data &= ~(0x3FF << 2);
   17101 	if (enable)
   17102 		data |= (I82579_TX_PTR_GAP << 2);
   17103 	else
   17104 		data |= (0x8 << 2);
   17105 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   17106 	if (rv != 0)
   17107 		goto out;
   17108 
   17109 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   17110 	    enable ? 0xf100 : 0x7e00);
   17111 	if (rv != 0)
   17112 		goto out;
   17113 
   17114 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   17115 	if (rv != 0)
   17116 		goto out;
   17117 	if (enable)
   17118 		data |= 1 << 10;
   17119 	else
   17120 		data &= ~(1 << 10);
   17121 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   17122 	if (rv != 0)
   17123 		goto out;
   17124 
   17125 	/* Re-enable Rx path after enabling/disabling workaround */
   17126 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   17127 	    dft_ctrl & ~(1 << 14));
   17128 
   17129 out:
   17130 	sc->phy.release(sc);
   17131 
   17132 	return rv;
   17133 }
   17134 
   17135 /*
   17136  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   17137  *  done after every PHY reset.
   17138  */
   17139 static int
   17140 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   17141 {
   17142 	device_t dev = sc->sc_dev;
   17143 	int rv;
   17144 
   17145 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17146 		device_xname(dev), __func__));
   17147 	KASSERT(sc->sc_type == WM_T_PCH2);
   17148 
   17149 	/* Set MDIO slow mode before any other MDIO access */
   17150 	rv = wm_set_mdio_slow_mode_hv(sc);
   17151 	if (rv != 0)
   17152 		return rv;
   17153 
   17154 	rv = sc->phy.acquire(sc);
   17155 	if (rv != 0)
   17156 		return rv;
   17157 	/* Set MSE higher to enable link to stay up when noise is high */
   17158 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   17159 	if (rv != 0)
   17160 		goto release;
   17161 	/* Drop link after 5 times MSE threshold was reached */
   17162 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   17163 release:
   17164 	sc->phy.release(sc);
   17165 
   17166 	return rv;
   17167 }
   17168 
   17169 /**
   17170  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   17171  *  @link: link up bool flag
   17172  *
   17173  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   17174  *  preventing further DMA write requests.  Workaround the issue by disabling
   17175  *  the de-assertion of the clock request when in 1Gpbs mode.
   17176  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   17177  *  speeds in order to avoid Tx hangs.
   17178  **/
   17179 static int
   17180 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   17181 {
   17182 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   17183 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   17184 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   17185 	uint16_t phyreg;
   17186 
   17187 	if (link && (speed == STATUS_SPEED_1000)) {
   17188 		int rv;
   17189 
   17190 		rv = sc->phy.acquire(sc);
   17191 		if (rv != 0)
   17192 			return rv;
   17193 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17194 		    &phyreg);
   17195 		if (rv != 0)
   17196 			goto release;
   17197 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17198 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   17199 		if (rv != 0)
   17200 			goto release;
   17201 		delay(20);
   17202 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   17203 
   17204 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   17205 		    &phyreg);
   17206 release:
   17207 		sc->phy.release(sc);
   17208 		return rv;
   17209 	}
   17210 
   17211 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   17212 
   17213 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   17214 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   17215 	    || !link
   17216 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   17217 		goto update_fextnvm6;
   17218 
   17219 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   17220 
   17221 	/* Clear link status transmit timeout */
   17222 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17223 	if (speed == STATUS_SPEED_100) {
   17224 		/* Set inband Tx timeout to 5x10us for 100Half */
   17225 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17226 
   17227 		/* Do not extend the K1 entry latency for 100Half */
   17228 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17229 	} else {
   17230 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17231 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17232 
   17233 		/* Extend the K1 entry latency for 10 Mbps */
   17234 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17235 	}
   17236 
   17237 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17238 
   17239 update_fextnvm6:
   17240 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17241 	return 0;
   17242 }
   17243 
   17244 /*
   17245  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17246  *  @sc:   pointer to the HW structure
   17247  *  @link: link up bool flag
   17248  *
   17249  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17250  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17251  *  If link is down, the function will restore the default K1 setting located
   17252  *  in the NVM.
   17253  */
   17254 static int
   17255 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17256 {
   17257 	int k1_enable = sc->sc_nvm_k1_enabled;
   17258 	int rv;
   17259 
   17260 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17261 		device_xname(sc->sc_dev), __func__));
   17262 
   17263 	rv = sc->phy.acquire(sc);
   17264 	if (rv != 0)
   17265 		return rv;
   17266 
   17267 	if (link) {
   17268 		k1_enable = 0;
   17269 
   17270 		/* Link stall fix for link up */
   17271 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17272 		    0x0100);
   17273 	} else {
   17274 		/* Link stall fix for link down */
   17275 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17276 		    0x4100);
   17277 	}
   17278 
   17279 	wm_configure_k1_ich8lan(sc, k1_enable);
   17280 	sc->phy.release(sc);
   17281 
   17282 	return 0;
   17283 }
   17284 
   17285 /*
   17286  *  wm_k1_workaround_lv - K1 Si workaround
   17287  *  @sc:   pointer to the HW structure
   17288  *
   17289  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17290  *  Disable K1 for 1000 and 100 speeds
   17291  */
   17292 static int
   17293 wm_k1_workaround_lv(struct wm_softc *sc)
   17294 {
   17295 	uint32_t reg;
   17296 	uint16_t phyreg;
   17297 	int rv;
   17298 
   17299 	if (sc->sc_type != WM_T_PCH2)
   17300 		return 0;
   17301 
   17302 	/* Set K1 beacon duration based on 10Mbps speed */
   17303 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17304 	if (rv != 0)
   17305 		return rv;
   17306 
   17307 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17308 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17309 		if (phyreg &
   17310 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17311 			/* LV 1G/100 Packet drop issue wa  */
   17312 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17313 			    &phyreg);
   17314 			if (rv != 0)
   17315 				return rv;
   17316 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17317 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17318 			    phyreg);
   17319 			if (rv != 0)
   17320 				return rv;
   17321 		} else {
   17322 			/* For 10Mbps */
   17323 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17324 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17325 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17326 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17327 		}
   17328 	}
   17329 
   17330 	return 0;
   17331 }
   17332 
   17333 /*
   17334  *  wm_link_stall_workaround_hv - Si workaround
   17335  *  @sc: pointer to the HW structure
   17336  *
   17337  *  This function works around a Si bug where the link partner can get
   17338  *  a link up indication before the PHY does. If small packets are sent
   17339  *  by the link partner they can be placed in the packet buffer without
   17340  *  being properly accounted for by the PHY and will stall preventing
   17341  *  further packets from being received.  The workaround is to clear the
   17342  *  packet buffer after the PHY detects link up.
   17343  */
   17344 static int
   17345 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17346 {
   17347 	uint16_t phyreg;
   17348 
   17349 	if (sc->sc_phytype != WMPHY_82578)
   17350 		return 0;
   17351 
   17352 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17353 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17354 	if ((phyreg & BMCR_LOOP) != 0)
   17355 		return 0;
   17356 
   17357 	/* Check if link is up and at 1Gbps */
   17358 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17359 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17360 	    | BM_CS_STATUS_SPEED_MASK;
   17361 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17362 		| BM_CS_STATUS_SPEED_1000))
   17363 		return 0;
   17364 
   17365 	delay(200 * 1000);	/* XXX too big */
   17366 
   17367 	/* Flush the packets in the fifo buffer */
   17368 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17369 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17370 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17371 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17372 
   17373 	return 0;
   17374 }
   17375 
   17376 static int
   17377 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17378 {
   17379 	int rv;
   17380 
   17381 	rv = sc->phy.acquire(sc);
   17382 	if (rv != 0) {
   17383 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17384 		    __func__);
   17385 		return rv;
   17386 	}
   17387 
   17388 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17389 
   17390 	sc->phy.release(sc);
   17391 
   17392 	return rv;
   17393 }
   17394 
   17395 static int
   17396 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17397 {
   17398 	int rv;
   17399 	uint16_t reg;
   17400 
   17401 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17402 	if (rv != 0)
   17403 		return rv;
   17404 
   17405 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17406 	    reg | HV_KMRN_MDIO_SLOW);
   17407 }
   17408 
   17409 /*
   17410  *  wm_configure_k1_ich8lan - Configure K1 power state
   17411  *  @sc: pointer to the HW structure
   17412  *  @enable: K1 state to configure
   17413  *
   17414  *  Configure the K1 power state based on the provided parameter.
   17415  *  Assumes semaphore already acquired.
   17416  */
   17417 static void
   17418 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17419 {
   17420 	uint32_t ctrl, ctrl_ext, tmp;
   17421 	uint16_t kmreg;
   17422 	int rv;
   17423 
   17424 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17425 
   17426 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17427 	if (rv != 0)
   17428 		return;
   17429 
   17430 	if (k1_enable)
   17431 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17432 	else
   17433 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17434 
   17435 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17436 	if (rv != 0)
   17437 		return;
   17438 
   17439 	delay(20);
   17440 
   17441 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17442 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17443 
   17444 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17445 	tmp |= CTRL_FRCSPD;
   17446 
   17447 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17448 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17449 	CSR_WRITE_FLUSH(sc);
   17450 	delay(20);
   17451 
   17452 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17453 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17454 	CSR_WRITE_FLUSH(sc);
   17455 	delay(20);
   17456 
   17457 	return;
   17458 }
   17459 
   17460 /* special case - for 82575 - need to do manual init ... */
   17461 static void
   17462 wm_reset_init_script_82575(struct wm_softc *sc)
   17463 {
   17464 	/*
   17465 	 * Remark: this is untested code - we have no board without EEPROM
   17466 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17467 	 */
   17468 
   17469 	/* SerDes configuration via SERDESCTRL */
   17470 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17471 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17472 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17473 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17474 
   17475 	/* CCM configuration via CCMCTL register */
   17476 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17477 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17478 
   17479 	/* PCIe lanes configuration */
   17480 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17481 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17482 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17483 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17484 
   17485 	/* PCIe PLL Configuration */
   17486 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17487 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17488 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17489 }
   17490 
   17491 static void
   17492 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17493 {
   17494 	uint32_t reg;
   17495 	uint16_t nvmword;
   17496 	int rv;
   17497 
   17498 	if (sc->sc_type != WM_T_82580)
   17499 		return;
   17500 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17501 		return;
   17502 
   17503 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17504 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17505 	if (rv != 0) {
   17506 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17507 		    __func__);
   17508 		return;
   17509 	}
   17510 
   17511 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17512 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17513 		reg |= MDICNFG_DEST;
   17514 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17515 		reg |= MDICNFG_COM_MDIO;
   17516 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17517 }
   17518 
   17519 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17520 
   17521 static bool
   17522 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17523 {
   17524 	uint32_t reg;
   17525 	uint16_t id1, id2;
   17526 	int i, rv;
   17527 
   17528 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17529 		device_xname(sc->sc_dev), __func__));
   17530 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17531 
   17532 	id1 = id2 = 0xffff;
   17533 	for (i = 0; i < 2; i++) {
   17534 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17535 		    &id1);
   17536 		if ((rv != 0) || MII_INVALIDID(id1))
   17537 			continue;
   17538 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17539 		    &id2);
   17540 		if ((rv != 0) || MII_INVALIDID(id2))
   17541 			continue;
   17542 		break;
   17543 	}
   17544 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17545 		goto out;
   17546 
   17547 	/*
   17548 	 * In case the PHY needs to be in mdio slow mode,
   17549 	 * set slow mode and try to get the PHY id again.
   17550 	 */
   17551 	rv = 0;
   17552 	if (sc->sc_type < WM_T_PCH_LPT) {
   17553 		wm_set_mdio_slow_mode_hv_locked(sc);
   17554 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17555 		    &id1);
   17556 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17557 		    &id2);
   17558 	}
   17559 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17560 		device_printf(sc->sc_dev, "XXX return with false\n");
   17561 		return false;
   17562 	}
   17563 out:
   17564 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17565 		/* Only unforce SMBus if ME is not active */
   17566 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17567 			uint16_t phyreg;
   17568 
   17569 			/* Unforce SMBus mode in PHY */
   17570 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17571 			    CV_SMB_CTRL, &phyreg);
   17572 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17573 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17574 			    CV_SMB_CTRL, phyreg);
   17575 
   17576 			/* Unforce SMBus mode in MAC */
   17577 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17578 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17579 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17580 		}
   17581 	}
   17582 	return true;
   17583 }
   17584 
   17585 static void
   17586 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17587 {
   17588 	uint32_t reg;
   17589 	int i;
   17590 
   17591 	/* Set PHY Config Counter to 50msec */
   17592 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17593 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17594 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17595 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17596 
   17597 	/* Toggle LANPHYPC */
   17598 	reg = CSR_READ(sc, WMREG_CTRL);
   17599 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17600 	reg &= ~CTRL_LANPHYPC_VALUE;
   17601 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17602 	CSR_WRITE_FLUSH(sc);
   17603 	delay(1000);
   17604 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17605 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17606 	CSR_WRITE_FLUSH(sc);
   17607 
   17608 	if (sc->sc_type < WM_T_PCH_LPT)
   17609 		delay(50 * 1000);
   17610 	else {
   17611 		i = 20;
   17612 
   17613 		do {
   17614 			delay(5 * 1000);
   17615 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17616 		    && i--);
   17617 
   17618 		delay(30 * 1000);
   17619 	}
   17620 }
   17621 
   17622 static int
   17623 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17624 {
   17625 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17626 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17627 	uint32_t rxa;
   17628 	uint16_t scale = 0, lat_enc = 0;
   17629 	int32_t obff_hwm = 0;
   17630 	int64_t lat_ns, value;
   17631 
   17632 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17633 		device_xname(sc->sc_dev), __func__));
   17634 
   17635 	if (link) {
   17636 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17637 		uint32_t status;
   17638 		uint16_t speed;
   17639 		pcireg_t preg;
   17640 
   17641 		status = CSR_READ(sc, WMREG_STATUS);
   17642 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17643 		case STATUS_SPEED_10:
   17644 			speed = 10;
   17645 			break;
   17646 		case STATUS_SPEED_100:
   17647 			speed = 100;
   17648 			break;
   17649 		case STATUS_SPEED_1000:
   17650 			speed = 1000;
   17651 			break;
   17652 		default:
   17653 			device_printf(sc->sc_dev, "Unknown speed "
   17654 			    "(status = %08x)\n", status);
   17655 			return -1;
   17656 		}
   17657 
   17658 		/* Rx Packet Buffer Allocation size (KB) */
   17659 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17660 
   17661 		/*
   17662 		 * Determine the maximum latency tolerated by the device.
   17663 		 *
   17664 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17665 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17666 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17667 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17668 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17669 		 */
   17670 		lat_ns = ((int64_t)rxa * 1024 -
   17671 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17672 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17673 		if (lat_ns < 0)
   17674 			lat_ns = 0;
   17675 		else
   17676 			lat_ns /= speed;
   17677 		value = lat_ns;
   17678 
   17679 		while (value > LTRV_VALUE) {
   17680 			scale ++;
   17681 			value = howmany(value, __BIT(5));
   17682 		}
   17683 		if (scale > LTRV_SCALE_MAX) {
   17684 			device_printf(sc->sc_dev,
   17685 			    "Invalid LTR latency scale %d\n", scale);
   17686 			return -1;
   17687 		}
   17688 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17689 
   17690 		/* Determine the maximum latency tolerated by the platform */
   17691 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17692 		    WM_PCI_LTR_CAP_LPT);
   17693 		max_snoop = preg & 0xffff;
   17694 		max_nosnoop = preg >> 16;
   17695 
   17696 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17697 
   17698 		if (lat_enc > max_ltr_enc) {
   17699 			lat_enc = max_ltr_enc;
   17700 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17701 			    * PCI_LTR_SCALETONS(
   17702 				    __SHIFTOUT(lat_enc,
   17703 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17704 		}
   17705 
   17706 		if (lat_ns) {
   17707 			lat_ns *= speed * 1000;
   17708 			lat_ns /= 8;
   17709 			lat_ns /= 1000000000;
   17710 			obff_hwm = (int32_t)(rxa - lat_ns);
   17711 		}
   17712 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17713 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17714 			    "(rxa = %d, lat_ns = %d)\n",
   17715 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17716 			return -1;
   17717 		}
   17718 	}
   17719 	/* Snoop and No-Snoop latencies the same */
   17720 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17721 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17722 
   17723 	/* Set OBFF high water mark */
   17724 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17725 	reg |= obff_hwm;
   17726 	CSR_WRITE(sc, WMREG_SVT, reg);
   17727 
   17728 	/* Enable OBFF */
   17729 	reg = CSR_READ(sc, WMREG_SVCR);
   17730 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17731 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17732 
   17733 	return 0;
   17734 }
   17735 
   17736 /*
   17737  * I210 Errata 25 and I211 Errata 10
   17738  * Slow System Clock.
   17739  *
   17740  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17741  */
   17742 static int
   17743 wm_pll_workaround_i210(struct wm_softc *sc)
   17744 {
   17745 	uint32_t mdicnfg, wuc;
   17746 	uint32_t reg;
   17747 	pcireg_t pcireg;
   17748 	uint32_t pmreg;
   17749 	uint16_t nvmword, tmp_nvmword;
   17750 	uint16_t phyval;
   17751 	bool wa_done = false;
   17752 	int i, rv = 0;
   17753 
   17754 	/* Get Power Management cap offset */
   17755 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17756 	    &pmreg, NULL) == 0)
   17757 		return -1;
   17758 
   17759 	/* Save WUC and MDICNFG registers */
   17760 	wuc = CSR_READ(sc, WMREG_WUC);
   17761 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17762 
   17763 	reg = mdicnfg & ~MDICNFG_DEST;
   17764 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17765 
   17766 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17767 		/*
   17768 		 * The default value of the Initialization Control Word 1
   17769 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17770 		 */
   17771 		nvmword = INVM_DEFAULT_AL;
   17772 	}
   17773 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17774 
   17775 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17776 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17777 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17778 
   17779 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17780 			rv = 0;
   17781 			break; /* OK */
   17782 		} else
   17783 			rv = -1;
   17784 
   17785 		wa_done = true;
   17786 		/* Directly reset the internal PHY */
   17787 		reg = CSR_READ(sc, WMREG_CTRL);
   17788 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17789 
   17790 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17791 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17792 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17793 
   17794 		CSR_WRITE(sc, WMREG_WUC, 0);
   17795 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17796 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17797 
   17798 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17799 		    pmreg + PCI_PMCSR);
   17800 		pcireg |= PCI_PMCSR_STATE_D3;
   17801 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17802 		    pmreg + PCI_PMCSR, pcireg);
   17803 		delay(1000);
   17804 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17805 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17806 		    pmreg + PCI_PMCSR, pcireg);
   17807 
   17808 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17809 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17810 
   17811 		/* Restore WUC register */
   17812 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17813 	}
   17814 
   17815 	/* Restore MDICNFG setting */
   17816 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17817 	if (wa_done)
   17818 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17819 	return rv;
   17820 }
   17821 
   17822 static void
   17823 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17824 {
   17825 	uint32_t reg;
   17826 
   17827 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17828 		device_xname(sc->sc_dev), __func__));
   17829 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17830 	    || (sc->sc_type == WM_T_PCH_CNP));
   17831 
   17832 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17833 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17834 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17835 
   17836 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17837 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17838 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17839 }
   17840 
   17841 /* Sysctl functions */
   17842 static int
   17843 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17844 {
   17845 	struct sysctlnode node = *rnode;
   17846 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17847 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17848 	struct wm_softc *sc = txq->txq_sc;
   17849 	uint32_t reg;
   17850 
   17851 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17852 	node.sysctl_data = &reg;
   17853 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17854 }
   17855 
   17856 static int
   17857 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17858 {
   17859 	struct sysctlnode node = *rnode;
   17860 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17861 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17862 	struct wm_softc *sc = txq->txq_sc;
   17863 	uint32_t reg;
   17864 
   17865 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17866 	node.sysctl_data = &reg;
   17867 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17868 }
   17869 
   17870 #ifdef WM_DEBUG
   17871 static int
   17872 wm_sysctl_debug(SYSCTLFN_ARGS)
   17873 {
   17874 	struct sysctlnode node = *rnode;
   17875 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17876 	uint32_t dflags;
   17877 	int error;
   17878 
   17879 	dflags = sc->sc_debug;
   17880 	node.sysctl_data = &dflags;
   17881 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17882 
   17883 	if (error || newp == NULL)
   17884 		return error;
   17885 
   17886 	sc->sc_debug = dflags;
   17887 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17888 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17889 
   17890 	return 0;
   17891 }
   17892 #endif
   17893