Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.716
      1 /*	$NetBSD: if_wm.c,v 1.716 2021/11/04 12:25:05 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.716 2021/11/04 12:25:05 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 #include <sys/atomic.h>
    111 
    112 #include <sys/rndsource.h>
    113 
    114 #include <net/if.h>
    115 #include <net/if_dl.h>
    116 #include <net/if_media.h>
    117 #include <net/if_ether.h>
    118 
    119 #include <net/bpf.h>
    120 
    121 #include <net/rss_config.h>
    122 
    123 #include <netinet/in.h>			/* XXX for struct ip */
    124 #include <netinet/in_systm.h>		/* XXX for struct ip */
    125 #include <netinet/ip.h>			/* XXX for struct ip */
    126 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    127 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    128 
    129 #include <sys/bus.h>
    130 #include <sys/intr.h>
    131 #include <machine/endian.h>
    132 
    133 #include <dev/mii/mii.h>
    134 #include <dev/mii/mdio.h>
    135 #include <dev/mii/miivar.h>
    136 #include <dev/mii/miidevs.h>
    137 #include <dev/mii/mii_bitbang.h>
    138 #include <dev/mii/ikphyreg.h>
    139 #include <dev/mii/igphyreg.h>
    140 #include <dev/mii/igphyvar.h>
    141 #include <dev/mii/inbmphyreg.h>
    142 #include <dev/mii/ihphyreg.h>
    143 #include <dev/mii/makphyreg.h>
    144 
    145 #include <dev/pci/pcireg.h>
    146 #include <dev/pci/pcivar.h>
    147 #include <dev/pci/pcidevs.h>
    148 
    149 #include <dev/pci/if_wmreg.h>
    150 #include <dev/pci/if_wmvar.h>
    151 
    152 #ifdef WM_DEBUG
    153 #define	WM_DEBUG_LINK		__BIT(0)
    154 #define	WM_DEBUG_TX		__BIT(1)
    155 #define	WM_DEBUG_RX		__BIT(2)
    156 #define	WM_DEBUG_GMII		__BIT(3)
    157 #define	WM_DEBUG_MANAGE		__BIT(4)
    158 #define	WM_DEBUG_NVM		__BIT(5)
    159 #define	WM_DEBUG_INIT		__BIT(6)
    160 #define	WM_DEBUG_LOCK		__BIT(7)
    161 
    162 #if 0
    163 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    164 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    165 	WM_DEBUG_LOCK
    166 #endif
    167 
    168 #define	DPRINTF(sc, x, y)			  \
    169 	do {					  \
    170 		if ((sc)->sc_debug & (x))	  \
    171 			printf y;		  \
    172 	} while (0)
    173 #else
    174 #define	DPRINTF(sc, x, y)	__nothing
    175 #endif /* WM_DEBUG */
    176 
    177 #ifdef NET_MPSAFE
    178 #define WM_MPSAFE	1
    179 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    180 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    181 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    182 #else
    183 #define WM_CALLOUT_FLAGS	0
    184 #define WM_SOFTINT_FLAGS	0
    185 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    186 #endif
    187 
    188 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    189 
    190 /*
    191  * This device driver's max interrupt numbers.
    192  */
    193 #define WM_MAX_NQUEUEINTR	16
    194 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    195 
    196 #ifndef WM_DISABLE_MSI
    197 #define	WM_DISABLE_MSI 0
    198 #endif
    199 #ifndef WM_DISABLE_MSIX
    200 #define	WM_DISABLE_MSIX 0
    201 #endif
    202 
    203 int wm_disable_msi = WM_DISABLE_MSI;
    204 int wm_disable_msix = WM_DISABLE_MSIX;
    205 
    206 #ifndef WM_WATCHDOG_TIMEOUT
    207 #define WM_WATCHDOG_TIMEOUT 5
    208 #endif
    209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    210 
    211 /*
    212  * Transmit descriptor list size.  Due to errata, we can only have
    213  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    214  * on >= 82544. We tell the upper layers that they can queue a lot
    215  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    216  * of them at a time.
    217  *
    218  * We allow up to 64 DMA segments per packet.  Pathological packet
    219  * chains containing many small mbufs have been observed in zero-copy
    220  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    221  * m_defrag() is called to reduce it.
    222  */
    223 #define	WM_NTXSEGS		64
    224 #define	WM_IFQUEUELEN		256
    225 #define	WM_TXQUEUELEN_MAX	64
    226 #define	WM_TXQUEUELEN_MAX_82547	16
    227 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    228 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    229 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    230 #define	WM_NTXDESC_82542	256
    231 #define	WM_NTXDESC_82544	4096
    232 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    233 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    234 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    235 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    236 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    237 
    238 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    239 
    240 #define	WM_TXINTERQSIZE		256
    241 
    242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 /*
    250  * Receive descriptor list size.  We have one Rx buffer for normal
    251  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    252  * packet.  We allocate 256 receive descriptors, each with a 2k
    253  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    254  */
    255 #define	WM_NRXDESC		256U
    256 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    257 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    258 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    259 
    260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    261 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    262 #endif
    263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    264 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    265 #endif
    266 
    267 typedef union txdescs {
    268 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    269 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    270 } txdescs_t;
    271 
    272 typedef union rxdescs {
    273 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    274 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    275 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    276 } rxdescs_t;
    277 
    278 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    279 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    280 
    281 /*
    282  * Software state for transmit jobs.
    283  */
    284 struct wm_txsoft {
    285 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    287 	int txs_firstdesc;		/* first descriptor in packet */
    288 	int txs_lastdesc;		/* last descriptor in packet */
    289 	int txs_ndesc;			/* # of descriptors used */
    290 };
    291 
    292 /*
    293  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    294  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    295  * them together.
    296  */
    297 struct wm_rxsoft {
    298 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    299 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    300 };
    301 
    302 #define WM_LINKUP_TIMEOUT	50
    303 
    304 static uint16_t swfwphysem[] = {
    305 	SWFW_PHY0_SM,
    306 	SWFW_PHY1_SM,
    307 	SWFW_PHY2_SM,
    308 	SWFW_PHY3_SM
    309 };
    310 
    311 static const uint32_t wm_82580_rxpbs_table[] = {
    312 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    313 };
    314 
    315 struct wm_softc;
    316 
    317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    318 #if !defined(WM_EVENT_COUNTERS)
    319 #define WM_EVENT_COUNTERS 1
    320 #endif
    321 #endif
    322 
    323 #ifdef WM_EVENT_COUNTERS
    324 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    325 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    326 	struct evcnt qname##_ev_##evname;
    327 
    328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    329 	do {								\
    330 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    331 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    332 		    "%s%02d%s", #qname, (qnum), #evname);		\
    333 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    334 		    (evtype), NULL, (xname),				\
    335 		    (q)->qname##_##evname##_evcnt_name);		\
    336 	} while (0)
    337 
    338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    339 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    340 
    341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    342 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    343 
    344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    345 	evcnt_detach(&(q)->qname##_ev_##evname);
    346 #endif /* WM_EVENT_COUNTERS */
    347 
    348 struct wm_txqueue {
    349 	kmutex_t *txq_lock;		/* lock for tx operations */
    350 
    351 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    352 
    353 	/* Software state for the transmit descriptors. */
    354 	int txq_num;			/* must be a power of two */
    355 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    356 
    357 	/* TX control data structures. */
    358 	int txq_ndesc;			/* must be a power of two */
    359 	size_t txq_descsize;		/* a tx descriptor size */
    360 	txdescs_t *txq_descs_u;
    361 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    363 	int txq_desc_rseg;		/* real number of control segment */
    364 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    365 #define	txq_descs	txq_descs_u->sctxu_txdescs
    366 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    367 
    368 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    369 
    370 	int txq_free;			/* number of free Tx descriptors */
    371 	int txq_next;			/* next ready Tx descriptor */
    372 
    373 	int txq_sfree;			/* number of free Tx jobs */
    374 	int txq_snext;			/* next free Tx job */
    375 	int txq_sdirty;			/* dirty Tx jobs */
    376 
    377 	/* These 4 variables are used only on the 82547. */
    378 	int txq_fifo_size;		/* Tx FIFO size */
    379 	int txq_fifo_head;		/* current head of FIFO */
    380 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    381 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    382 
    383 	/*
    384 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    385 	 * CPUs. This queue intermediate them without block.
    386 	 */
    387 	pcq_t *txq_interq;
    388 
    389 	/*
    390 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    391 	 * to manage Tx H/W queue's busy flag.
    392 	 */
    393 	int txq_flags;			/* flags for H/W queue, see below */
    394 #define	WM_TXQ_NO_SPACE		0x1
    395 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    396 
    397 	bool txq_stopping;
    398 
    399 	bool txq_sending;
    400 	time_t txq_lastsent;
    401 
    402 	/* Checksum flags used for previous packet */
    403 	uint32_t	txq_last_hw_cmd;
    404 	uint8_t		txq_last_hw_fields;
    405 	uint16_t	txq_last_hw_ipcs;
    406 	uint16_t	txq_last_hw_tucs;
    407 
    408 	uint32_t txq_packets;		/* for AIM */
    409 	uint32_t txq_bytes;		/* for AIM */
    410 #ifdef WM_EVENT_COUNTERS
    411 	/* TX event counters */
    412 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    413 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    414 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    415 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    416 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    417 					    /* XXX not used? */
    418 
    419 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    422 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    423 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    424 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    425 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    426 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    427 					    /* other than toomanyseg */
    428 
    429 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    430 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    431 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    432 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    433 
    434 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    435 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    436 #endif /* WM_EVENT_COUNTERS */
    437 };
    438 
    439 struct wm_rxqueue {
    440 	kmutex_t *rxq_lock;		/* lock for rx operations */
    441 
    442 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    443 
    444 	/* Software state for the receive descriptors. */
    445 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    446 
    447 	/* RX control data structures. */
    448 	int rxq_ndesc;			/* must be a power of two */
    449 	size_t rxq_descsize;		/* a rx descriptor size */
    450 	rxdescs_t *rxq_descs_u;
    451 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    452 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    453 	int rxq_desc_rseg;		/* real number of control segment */
    454 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    455 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    456 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    457 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    458 
    459 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    460 
    461 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    462 	int rxq_discard;
    463 	int rxq_len;
    464 	struct mbuf *rxq_head;
    465 	struct mbuf *rxq_tail;
    466 	struct mbuf **rxq_tailp;
    467 
    468 	bool rxq_stopping;
    469 
    470 	uint32_t rxq_packets;		/* for AIM */
    471 	uint32_t rxq_bytes;		/* for AIM */
    472 #ifdef WM_EVENT_COUNTERS
    473 	/* RX event counters */
    474 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    475 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    476 
    477 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    478 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    479 #endif
    480 };
    481 
    482 struct wm_queue {
    483 	int wmq_id;			/* index of TX/RX queues */
    484 	int wmq_intr_idx;		/* index of MSI-X tables */
    485 
    486 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    487 	bool wmq_set_itr;
    488 
    489 	struct wm_txqueue wmq_txq;
    490 	struct wm_rxqueue wmq_rxq;
    491 	char sysctlname[32];		/* Name for sysctl */
    492 
    493 	bool wmq_txrx_use_workqueue;
    494 	struct work wmq_cookie;
    495 	void *wmq_si;
    496 };
    497 
    498 struct wm_phyop {
    499 	int (*acquire)(struct wm_softc *);
    500 	void (*release)(struct wm_softc *);
    501 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    502 	int (*writereg_locked)(device_t, int, int, uint16_t);
    503 	int reset_delay_us;
    504 	bool no_errprint;
    505 };
    506 
    507 struct wm_nvmop {
    508 	int (*acquire)(struct wm_softc *);
    509 	void (*release)(struct wm_softc *);
    510 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    511 };
    512 
    513 /*
    514  * Software state per device.
    515  */
    516 struct wm_softc {
    517 	device_t sc_dev;		/* generic device information */
    518 	bus_space_tag_t sc_st;		/* bus space tag */
    519 	bus_space_handle_t sc_sh;	/* bus space handle */
    520 	bus_size_t sc_ss;		/* bus space size */
    521 	bus_space_tag_t sc_iot;		/* I/O space tag */
    522 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    523 	bus_size_t sc_ios;		/* I/O space size */
    524 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    525 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    526 	bus_size_t sc_flashs;		/* flash registers space size */
    527 	off_t sc_flashreg_offset;	/*
    528 					 * offset to flash registers from
    529 					 * start of BAR
    530 					 */
    531 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    532 
    533 	struct ethercom sc_ethercom;	/* ethernet common data */
    534 	struct mii_data sc_mii;		/* MII/media information */
    535 
    536 	pci_chipset_tag_t sc_pc;
    537 	pcitag_t sc_pcitag;
    538 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    539 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    540 
    541 	uint16_t sc_pcidevid;		/* PCI device ID */
    542 	wm_chip_type sc_type;		/* MAC type */
    543 	int sc_rev;			/* MAC revision */
    544 	wm_phy_type sc_phytype;		/* PHY type */
    545 	uint8_t sc_sfptype;		/* SFP type */
    546 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    547 #define	WM_MEDIATYPE_UNKNOWN		0x00
    548 #define	WM_MEDIATYPE_FIBER		0x01
    549 #define	WM_MEDIATYPE_COPPER		0x02
    550 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    551 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    552 	int sc_flags;			/* flags; see below */
    553 	u_short sc_if_flags;		/* last if_flags */
    554 	int sc_ec_capenable;		/* last ec_capenable */
    555 	int sc_flowflags;		/* 802.3x flow control flags */
    556 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    557 	int sc_align_tweak;
    558 
    559 	void *sc_ihs[WM_MAX_NINTR];	/*
    560 					 * interrupt cookie.
    561 					 * - legacy and msi use sc_ihs[0] only
    562 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    563 					 */
    564 	pci_intr_handle_t *sc_intrs;	/*
    565 					 * legacy and msi use sc_intrs[0] only
    566 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    567 					 */
    568 	int sc_nintrs;			/* number of interrupts */
    569 
    570 	int sc_link_intr_idx;		/* index of MSI-X tables */
    571 
    572 	callout_t sc_tick_ch;		/* tick callout */
    573 	bool sc_core_stopping;
    574 
    575 	int sc_nvm_ver_major;
    576 	int sc_nvm_ver_minor;
    577 	int sc_nvm_ver_build;
    578 	int sc_nvm_addrbits;		/* NVM address bits */
    579 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    580 	int sc_ich8_flash_base;
    581 	int sc_ich8_flash_bank_size;
    582 	int sc_nvm_k1_enabled;
    583 
    584 	int sc_nqueues;
    585 	struct wm_queue *sc_queue;
    586 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    587 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    588 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    589 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    590 	struct workqueue *sc_queue_wq;
    591 	bool sc_txrx_use_workqueue;
    592 
    593 	int sc_affinity_offset;
    594 
    595 #ifdef WM_EVENT_COUNTERS
    596 	/* Event counters. */
    597 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    598 
    599 	/* WM_T_82542_2_1 only */
    600 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    601 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    602 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    603 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    604 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    605 #endif /* WM_EVENT_COUNTERS */
    606 
    607 	struct sysctllog *sc_sysctllog;
    608 
    609 	/* This variable are used only on the 82547. */
    610 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    611 
    612 	uint32_t sc_ctrl;		/* prototype CTRL register */
    613 #if 0
    614 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    615 #endif
    616 	uint32_t sc_icr;		/* prototype interrupt bits */
    617 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    618 	uint32_t sc_tctl;		/* prototype TCTL register */
    619 	uint32_t sc_rctl;		/* prototype RCTL register */
    620 	uint32_t sc_txcw;		/* prototype TXCW register */
    621 	uint32_t sc_tipg;		/* prototype TIPG register */
    622 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    623 	uint32_t sc_pba;		/* prototype PBA register */
    624 
    625 	int sc_tbi_linkup;		/* TBI link status */
    626 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    627 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    628 
    629 	int sc_mchash_type;		/* multicast filter offset */
    630 
    631 	krndsource_t rnd_source;	/* random source */
    632 
    633 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    634 
    635 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    636 	kmutex_t *sc_ich_phymtx;	/*
    637 					 * 82574/82583/ICH/PCH specific PHY
    638 					 * mutex. For 82574/82583, the mutex
    639 					 * is used for both PHY and NVM.
    640 					 */
    641 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    642 
    643 	struct wm_phyop phy;
    644 	struct wm_nvmop nvm;
    645 #ifdef WM_DEBUG
    646 	uint32_t sc_debug;
    647 #endif
    648 };
    649 
    650 #define WM_CORE_LOCK(_sc)						\
    651 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    652 #define WM_CORE_UNLOCK(_sc)						\
    653 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    654 #define WM_CORE_LOCKED(_sc)						\
    655 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    656 
    657 #define	WM_RXCHAIN_RESET(rxq)						\
    658 do {									\
    659 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    660 	*(rxq)->rxq_tailp = NULL;					\
    661 	(rxq)->rxq_len = 0;						\
    662 } while (/*CONSTCOND*/0)
    663 
    664 #define	WM_RXCHAIN_LINK(rxq, m)						\
    665 do {									\
    666 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    667 	(rxq)->rxq_tailp = &(m)->m_next;				\
    668 } while (/*CONSTCOND*/0)
    669 
    670 #ifdef WM_EVENT_COUNTERS
    671 #ifdef __HAVE_ATOMIC64_LOADSTORE
    672 #define	WM_EVCNT_INCR(ev)						\
    673 	atomic_store_relaxed(&((ev)->ev_count),				\
    674 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    675 #define	WM_EVCNT_ADD(ev, val)						\
    676 	atomic_store_relaxed(&((ev)->ev_count),				\
    677 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    678 #else
    679 #define	WM_EVCNT_INCR(ev)						\
    680 	((ev)->ev_count)++
    681 #define	WM_EVCNT_ADD(ev, val)						\
    682 	(ev)->ev_count += (val)
    683 #endif
    684 
    685 #define WM_Q_EVCNT_INCR(qname, evname)			\
    686 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    687 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    688 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    689 #else /* !WM_EVENT_COUNTERS */
    690 #define	WM_EVCNT_INCR(ev)	/* nothing */
    691 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    692 
    693 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    694 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    695 #endif /* !WM_EVENT_COUNTERS */
    696 
    697 #define	CSR_READ(sc, reg)						\
    698 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    699 #define	CSR_WRITE(sc, reg, val)						\
    700 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    701 #define	CSR_WRITE_FLUSH(sc)						\
    702 	(void)CSR_READ((sc), WMREG_STATUS)
    703 
    704 #define ICH8_FLASH_READ32(sc, reg)					\
    705 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    706 	    (reg) + sc->sc_flashreg_offset)
    707 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    708 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    709 	    (reg) + sc->sc_flashreg_offset, (data))
    710 
    711 #define ICH8_FLASH_READ16(sc, reg)					\
    712 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    713 	    (reg) + sc->sc_flashreg_offset)
    714 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    715 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    716 	    (reg) + sc->sc_flashreg_offset, (data))
    717 
    718 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    719 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    720 
    721 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    722 #define	WM_CDTXADDR_HI(txq, x)						\
    723 	(sizeof(bus_addr_t) == 8 ?					\
    724 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    725 
    726 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    727 #define	WM_CDRXADDR_HI(rxq, x)						\
    728 	(sizeof(bus_addr_t) == 8 ?					\
    729 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    730 
    731 /*
    732  * Register read/write functions.
    733  * Other than CSR_{READ|WRITE}().
    734  */
    735 #if 0
    736 static inline uint32_t wm_io_read(struct wm_softc *, int);
    737 #endif
    738 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    740     uint32_t, uint32_t);
    741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    742 
    743 /*
    744  * Descriptor sync/init functions.
    745  */
    746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    749 
    750 /*
    751  * Device driver interface functions and commonly used functions.
    752  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    753  */
    754 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    755 static int	wm_match(device_t, cfdata_t, void *);
    756 static void	wm_attach(device_t, device_t, void *);
    757 static int	wm_detach(device_t, int);
    758 static bool	wm_suspend(device_t, const pmf_qual_t *);
    759 static bool	wm_resume(device_t, const pmf_qual_t *);
    760 static void	wm_watchdog(struct ifnet *);
    761 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    762     uint16_t *);
    763 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    764     uint16_t *);
    765 static void	wm_tick(void *);
    766 static int	wm_ifflags_cb(struct ethercom *);
    767 static int	wm_ioctl(struct ifnet *, u_long, void *);
    768 /* MAC address related */
    769 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    770 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    771 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    772 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    773 static int	wm_rar_count(struct wm_softc *);
    774 static void	wm_set_filter(struct wm_softc *);
    775 /* Reset and init related */
    776 static void	wm_set_vlan(struct wm_softc *);
    777 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    778 static void	wm_get_auto_rd_done(struct wm_softc *);
    779 static void	wm_lan_init_done(struct wm_softc *);
    780 static void	wm_get_cfg_done(struct wm_softc *);
    781 static int	wm_phy_post_reset(struct wm_softc *);
    782 static int	wm_write_smbus_addr(struct wm_softc *);
    783 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    784 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    785 static void	wm_initialize_hardware_bits(struct wm_softc *);
    786 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    787 static int	wm_reset_phy(struct wm_softc *);
    788 static void	wm_flush_desc_rings(struct wm_softc *);
    789 static void	wm_reset(struct wm_softc *);
    790 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    791 static void	wm_rxdrain(struct wm_rxqueue *);
    792 static void	wm_init_rss(struct wm_softc *);
    793 static void	wm_adjust_qnum(struct wm_softc *, int);
    794 static inline bool	wm_is_using_msix(struct wm_softc *);
    795 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    796 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    797 static int	wm_setup_legacy(struct wm_softc *);
    798 static int	wm_setup_msix(struct wm_softc *);
    799 static int	wm_init(struct ifnet *);
    800 static int	wm_init_locked(struct ifnet *);
    801 static void	wm_init_sysctls(struct wm_softc *);
    802 static void	wm_unset_stopping_flags(struct wm_softc *);
    803 static void	wm_set_stopping_flags(struct wm_softc *);
    804 static void	wm_stop(struct ifnet *, int);
    805 static void	wm_stop_locked(struct ifnet *, bool, bool);
    806 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    807 static void	wm_82547_txfifo_stall(void *);
    808 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    809 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    810 /* DMA related */
    811 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    812 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    813 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    814 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    815     struct wm_txqueue *);
    816 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    817 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    818 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    819     struct wm_rxqueue *);
    820 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    821 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    822 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    823 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    824 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    825 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    826 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    827     struct wm_txqueue *);
    828 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    829     struct wm_rxqueue *);
    830 static int	wm_alloc_txrx_queues(struct wm_softc *);
    831 static void	wm_free_txrx_queues(struct wm_softc *);
    832 static int	wm_init_txrx_queues(struct wm_softc *);
    833 /* Start */
    834 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    835     struct wm_txsoft *, uint32_t *, uint8_t *);
    836 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    837 static void	wm_start(struct ifnet *);
    838 static void	wm_start_locked(struct ifnet *);
    839 static int	wm_transmit(struct ifnet *, struct mbuf *);
    840 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    841 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    842 		    bool);
    843 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    844     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    845 static void	wm_nq_start(struct ifnet *);
    846 static void	wm_nq_start_locked(struct ifnet *);
    847 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    848 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    849 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    850 		    bool);
    851 static void	wm_deferred_start_locked(struct wm_txqueue *);
    852 static void	wm_handle_queue(void *);
    853 static void	wm_handle_queue_work(struct work *, void *);
    854 /* Interrupt */
    855 static bool	wm_txeof(struct wm_txqueue *, u_int);
    856 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    857 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    858 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    859 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    860 static void	wm_linkintr(struct wm_softc *, uint32_t);
    861 static int	wm_intr_legacy(void *);
    862 static inline void	wm_txrxintr_disable(struct wm_queue *);
    863 static inline void	wm_txrxintr_enable(struct wm_queue *);
    864 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    865 static int	wm_txrxintr_msix(void *);
    866 static int	wm_linkintr_msix(void *);
    867 
    868 /*
    869  * Media related.
    870  * GMII, SGMII, TBI, SERDES and SFP.
    871  */
    872 /* Common */
    873 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    874 /* GMII related */
    875 static void	wm_gmii_reset(struct wm_softc *);
    876 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    877 static int	wm_get_phy_id_82575(struct wm_softc *);
    878 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    879 static int	wm_gmii_mediachange(struct ifnet *);
    880 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    882 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    883 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    884 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    885 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    887 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    889 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    890 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    891 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    892 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    893 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    894 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    895 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    896 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    897 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    898 	bool);
    899 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    900 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    901 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    902 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    903 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    904 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    905 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    906 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    907 static void	wm_gmii_statchg(struct ifnet *);
    908 /*
    909  * kumeran related (80003, ICH* and PCH*).
    910  * These functions are not for accessing MII registers but for accessing
    911  * kumeran specific registers.
    912  */
    913 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    914 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    915 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    916 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    917 /* EMI register related */
    918 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    919 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    920 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    921 /* SGMII */
    922 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    923 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    924 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    925 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    926 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    927 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    928 /* TBI related */
    929 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    930 static void	wm_tbi_mediainit(struct wm_softc *);
    931 static int	wm_tbi_mediachange(struct ifnet *);
    932 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    933 static int	wm_check_for_link(struct wm_softc *);
    934 static void	wm_tbi_tick(struct wm_softc *);
    935 /* SERDES related */
    936 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    937 static int	wm_serdes_mediachange(struct ifnet *);
    938 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    939 static void	wm_serdes_tick(struct wm_softc *);
    940 /* SFP related */
    941 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    942 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    943 
    944 /*
    945  * NVM related.
    946  * Microwire, SPI (w/wo EERD) and Flash.
    947  */
    948 /* Misc functions */
    949 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    950 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    951 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    952 /* Microwire */
    953 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    954 /* SPI */
    955 static int	wm_nvm_ready_spi(struct wm_softc *);
    956 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    957 /* Using with EERD */
    958 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    959 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    960 /* Flash */
    961 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    962     unsigned int *);
    963 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    964 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    965 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    966     uint32_t *);
    967 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    968 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    969 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    970 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    971 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    972 /* iNVM */
    973 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    974 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    975 /* Lock, detecting NVM type, validate checksum and read */
    976 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    977 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    978 static int	wm_nvm_validate_checksum(struct wm_softc *);
    979 static void	wm_nvm_version_invm(struct wm_softc *);
    980 static void	wm_nvm_version(struct wm_softc *);
    981 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    982 
    983 /*
    984  * Hardware semaphores.
    985  * Very complexed...
    986  */
    987 static int	wm_get_null(struct wm_softc *);
    988 static void	wm_put_null(struct wm_softc *);
    989 static int	wm_get_eecd(struct wm_softc *);
    990 static void	wm_put_eecd(struct wm_softc *);
    991 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    992 static void	wm_put_swsm_semaphore(struct wm_softc *);
    993 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    994 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    995 static int	wm_get_nvm_80003(struct wm_softc *);
    996 static void	wm_put_nvm_80003(struct wm_softc *);
    997 static int	wm_get_nvm_82571(struct wm_softc *);
    998 static void	wm_put_nvm_82571(struct wm_softc *);
    999 static int	wm_get_phy_82575(struct wm_softc *);
   1000 static void	wm_put_phy_82575(struct wm_softc *);
   1001 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1002 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1003 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1004 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1005 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1006 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1007 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1008 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1009 
   1010 /*
   1011  * Management mode and power management related subroutines.
   1012  * BMC, AMT, suspend/resume and EEE.
   1013  */
   1014 #if 0
   1015 static int	wm_check_mng_mode(struct wm_softc *);
   1016 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1017 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1018 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1019 #endif
   1020 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1021 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1022 static void	wm_get_hw_control(struct wm_softc *);
   1023 static void	wm_release_hw_control(struct wm_softc *);
   1024 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1025 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1026 static void	wm_init_manageability(struct wm_softc *);
   1027 static void	wm_release_manageability(struct wm_softc *);
   1028 static void	wm_get_wakeup(struct wm_softc *);
   1029 static int	wm_ulp_disable(struct wm_softc *);
   1030 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1031 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1032 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1033 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1034 static void	wm_enable_wakeup(struct wm_softc *);
   1035 static void	wm_disable_aspm(struct wm_softc *);
   1036 /* LPLU (Low Power Link Up) */
   1037 static void	wm_lplu_d0_disable(struct wm_softc *);
   1038 /* EEE */
   1039 static int	wm_set_eee_i350(struct wm_softc *);
   1040 static int	wm_set_eee_pchlan(struct wm_softc *);
   1041 static int	wm_set_eee(struct wm_softc *);
   1042 
   1043 /*
   1044  * Workarounds (mainly PHY related).
   1045  * Basically, PHY's workarounds are in the PHY drivers.
   1046  */
   1047 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1048 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1049 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1050 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1051 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1052 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1053 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1054 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1055 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1056 static int	wm_k1_workaround_lv(struct wm_softc *);
   1057 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1058 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1059 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1060 static void	wm_reset_init_script_82575(struct wm_softc *);
   1061 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1062 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1063 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1064 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1065 static int	wm_pll_workaround_i210(struct wm_softc *);
   1066 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1067 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1068 static void	wm_set_linkdown_discard(struct wm_softc *);
   1069 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1070 
   1071 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1072 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1073 #ifdef WM_DEBUG
   1074 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1075 #endif
   1076 
   1077 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1078     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1079 
   1080 /*
   1081  * Devices supported by this driver.
   1082  */
   1083 static const struct wm_product {
   1084 	pci_vendor_id_t		wmp_vendor;
   1085 	pci_product_id_t	wmp_product;
   1086 	const char		*wmp_name;
   1087 	wm_chip_type		wmp_type;
   1088 	uint32_t		wmp_flags;
   1089 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1090 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1091 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1092 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1093 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1094 } wm_products[] = {
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1096 	  "Intel i82542 1000BASE-X Ethernet",
   1097 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1100 	  "Intel i82543GC 1000BASE-X Ethernet",
   1101 	  WM_T_82543,		WMP_F_FIBER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1104 	  "Intel i82543GC 1000BASE-T Ethernet",
   1105 	  WM_T_82543,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1108 	  "Intel i82544EI 1000BASE-T Ethernet",
   1109 	  WM_T_82544,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1112 	  "Intel i82544EI 1000BASE-X Ethernet",
   1113 	  WM_T_82544,		WMP_F_FIBER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1116 	  "Intel i82544GC 1000BASE-T Ethernet",
   1117 	  WM_T_82544,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1120 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1121 	  WM_T_82544,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1124 	  "Intel i82540EM 1000BASE-T Ethernet",
   1125 	  WM_T_82540,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1128 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1129 	  WM_T_82540,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1132 	  "Intel i82540EP 1000BASE-T Ethernet",
   1133 	  WM_T_82540,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1136 	  "Intel i82540EP 1000BASE-T Ethernet",
   1137 	  WM_T_82540,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1140 	  "Intel i82540EP 1000BASE-T Ethernet",
   1141 	  WM_T_82540,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1144 	  "Intel i82545EM 1000BASE-T Ethernet",
   1145 	  WM_T_82545,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1148 	  "Intel i82545GM 1000BASE-T Ethernet",
   1149 	  WM_T_82545_3,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1152 	  "Intel i82545GM 1000BASE-X Ethernet",
   1153 	  WM_T_82545_3,		WMP_F_FIBER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1156 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1157 	  WM_T_82545_3,		WMP_F_SERDES },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1160 	  "Intel i82546EB 1000BASE-T Ethernet",
   1161 	  WM_T_82546,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1164 	  "Intel i82546EB 1000BASE-T Ethernet",
   1165 	  WM_T_82546,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1168 	  "Intel i82545EM 1000BASE-X Ethernet",
   1169 	  WM_T_82545,		WMP_F_FIBER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1172 	  "Intel i82546EB 1000BASE-X Ethernet",
   1173 	  WM_T_82546,		WMP_F_FIBER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1176 	  "Intel i82546GB 1000BASE-T Ethernet",
   1177 	  WM_T_82546_3,		WMP_F_COPPER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1180 	  "Intel i82546GB 1000BASE-X Ethernet",
   1181 	  WM_T_82546_3,		WMP_F_FIBER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1184 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1185 	  WM_T_82546_3,		WMP_F_SERDES },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1188 	  "i82546GB quad-port Gigabit Ethernet",
   1189 	  WM_T_82546_3,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1192 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1193 	  WM_T_82546_3,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1196 	  "Intel PRO/1000MT (82546GB)",
   1197 	  WM_T_82546_3,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1200 	  "Intel i82541EI 1000BASE-T Ethernet",
   1201 	  WM_T_82541,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1204 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1205 	  WM_T_82541,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1208 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1209 	  WM_T_82541,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1212 	  "Intel i82541ER 1000BASE-T Ethernet",
   1213 	  WM_T_82541_2,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1216 	  "Intel i82541GI 1000BASE-T Ethernet",
   1217 	  WM_T_82541_2,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1220 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1221 	  WM_T_82541_2,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1224 	  "Intel i82541PI 1000BASE-T Ethernet",
   1225 	  WM_T_82541_2,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1228 	  "Intel i82547EI 1000BASE-T Ethernet",
   1229 	  WM_T_82547,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1232 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1233 	  WM_T_82547,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1236 	  "Intel i82547GI 1000BASE-T Ethernet",
   1237 	  WM_T_82547_2,		WMP_F_COPPER },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1240 	  "Intel PRO/1000 PT (82571EB)",
   1241 	  WM_T_82571,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1244 	  "Intel PRO/1000 PF (82571EB)",
   1245 	  WM_T_82571,		WMP_F_FIBER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1248 	  "Intel PRO/1000 PB (82571EB)",
   1249 	  WM_T_82571,		WMP_F_SERDES },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1252 	  "Intel PRO/1000 QT (82571EB)",
   1253 	  WM_T_82571,		WMP_F_COPPER },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1256 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1257 	  WM_T_82571,		WMP_F_COPPER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1260 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1261 	  WM_T_82571,		WMP_F_COPPER },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1264 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1265 	  WM_T_82571,		WMP_F_SERDES },
   1266 
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1268 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1269 	  WM_T_82571,		WMP_F_SERDES },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1272 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1273 	  WM_T_82571,		WMP_F_FIBER },
   1274 
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1276 	  "Intel i82572EI 1000baseT Ethernet",
   1277 	  WM_T_82572,		WMP_F_COPPER },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1280 	  "Intel i82572EI 1000baseX Ethernet",
   1281 	  WM_T_82572,		WMP_F_FIBER },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1284 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1285 	  WM_T_82572,		WMP_F_SERDES },
   1286 
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1288 	  "Intel i82572EI 1000baseT Ethernet",
   1289 	  WM_T_82572,		WMP_F_COPPER },
   1290 
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1292 	  "Intel i82573E",
   1293 	  WM_T_82573,		WMP_F_COPPER },
   1294 
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1296 	  "Intel i82573E IAMT",
   1297 	  WM_T_82573,		WMP_F_COPPER },
   1298 
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1300 	  "Intel i82573L Gigabit Ethernet",
   1301 	  WM_T_82573,		WMP_F_COPPER },
   1302 
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1304 	  "Intel i82574L",
   1305 	  WM_T_82574,		WMP_F_COPPER },
   1306 
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1308 	  "Intel i82574L",
   1309 	  WM_T_82574,		WMP_F_COPPER },
   1310 
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1312 	  "Intel i82583V",
   1313 	  WM_T_82583,		WMP_F_COPPER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1316 	  "i80003 dual 1000baseT Ethernet",
   1317 	  WM_T_80003,		WMP_F_COPPER },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1320 	  "i80003 dual 1000baseX Ethernet",
   1321 	  WM_T_80003,		WMP_F_COPPER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1324 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1325 	  WM_T_80003,		WMP_F_SERDES },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1328 	  "Intel i80003 1000baseT Ethernet",
   1329 	  WM_T_80003,		WMP_F_COPPER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1332 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1333 	  WM_T_80003,		WMP_F_SERDES },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1336 	  "Intel i82801H (M_AMT) LAN Controller",
   1337 	  WM_T_ICH8,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1339 	  "Intel i82801H (AMT) LAN Controller",
   1340 	  WM_T_ICH8,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1342 	  "Intel i82801H LAN Controller",
   1343 	  WM_T_ICH8,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1345 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1346 	  WM_T_ICH8,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1348 	  "Intel i82801H (M) LAN Controller",
   1349 	  WM_T_ICH8,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1351 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1352 	  WM_T_ICH8,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1354 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1355 	  WM_T_ICH8,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1357 	  "82567V-3 LAN Controller",
   1358 	  WM_T_ICH8,		WMP_F_COPPER },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1360 	  "82801I (AMT) LAN Controller",
   1361 	  WM_T_ICH9,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1363 	  "82801I 10/100 LAN Controller",
   1364 	  WM_T_ICH9,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1366 	  "82801I (G) 10/100 LAN Controller",
   1367 	  WM_T_ICH9,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1369 	  "82801I (GT) 10/100 LAN Controller",
   1370 	  WM_T_ICH9,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1372 	  "82801I (C) LAN Controller",
   1373 	  WM_T_ICH9,		WMP_F_COPPER },
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1375 	  "82801I mobile LAN Controller",
   1376 	  WM_T_ICH9,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1378 	  "82801I mobile (V) LAN Controller",
   1379 	  WM_T_ICH9,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1381 	  "82801I mobile (AMT) LAN Controller",
   1382 	  WM_T_ICH9,		WMP_F_COPPER },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1384 	  "82567LM-4 LAN Controller",
   1385 	  WM_T_ICH9,		WMP_F_COPPER },
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1387 	  "82567LM-2 LAN Controller",
   1388 	  WM_T_ICH10,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1390 	  "82567LF-2 LAN Controller",
   1391 	  WM_T_ICH10,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1393 	  "82567LM-3 LAN Controller",
   1394 	  WM_T_ICH10,		WMP_F_COPPER },
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1396 	  "82567LF-3 LAN Controller",
   1397 	  WM_T_ICH10,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1399 	  "82567V-2 LAN Controller",
   1400 	  WM_T_ICH10,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1402 	  "82567V-3? LAN Controller",
   1403 	  WM_T_ICH10,		WMP_F_COPPER },
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1405 	  "HANKSVILLE LAN Controller",
   1406 	  WM_T_ICH10,		WMP_F_COPPER },
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1408 	  "PCH LAN (82577LM) Controller",
   1409 	  WM_T_PCH,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1411 	  "PCH LAN (82577LC) Controller",
   1412 	  WM_T_PCH,		WMP_F_COPPER },
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1414 	  "PCH LAN (82578DM) Controller",
   1415 	  WM_T_PCH,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1417 	  "PCH LAN (82578DC) Controller",
   1418 	  WM_T_PCH,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1420 	  "PCH2 LAN (82579LM) Controller",
   1421 	  WM_T_PCH2,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1423 	  "PCH2 LAN (82579V) Controller",
   1424 	  WM_T_PCH2,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1426 	  "82575EB dual-1000baseT Ethernet",
   1427 	  WM_T_82575,		WMP_F_COPPER },
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1429 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1430 	  WM_T_82575,		WMP_F_SERDES },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1432 	  "82575GB quad-1000baseT Ethernet",
   1433 	  WM_T_82575,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1435 	  "82575GB quad-1000baseT Ethernet (PM)",
   1436 	  WM_T_82575,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1438 	  "82576 1000BaseT Ethernet",
   1439 	  WM_T_82576,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1441 	  "82576 1000BaseX Ethernet",
   1442 	  WM_T_82576,		WMP_F_FIBER },
   1443 
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1445 	  "82576 gigabit Ethernet (SERDES)",
   1446 	  WM_T_82576,		WMP_F_SERDES },
   1447 
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1449 	  "82576 quad-1000BaseT Ethernet",
   1450 	  WM_T_82576,		WMP_F_COPPER },
   1451 
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1453 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1454 	  WM_T_82576,		WMP_F_COPPER },
   1455 
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1457 	  "82576 gigabit Ethernet",
   1458 	  WM_T_82576,		WMP_F_COPPER },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1461 	  "82576 gigabit Ethernet (SERDES)",
   1462 	  WM_T_82576,		WMP_F_SERDES },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1464 	  "82576 quad-gigabit Ethernet (SERDES)",
   1465 	  WM_T_82576,		WMP_F_SERDES },
   1466 
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1468 	  "82580 1000BaseT Ethernet",
   1469 	  WM_T_82580,		WMP_F_COPPER },
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1471 	  "82580 1000BaseX Ethernet",
   1472 	  WM_T_82580,		WMP_F_FIBER },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1475 	  "82580 1000BaseT Ethernet (SERDES)",
   1476 	  WM_T_82580,		WMP_F_SERDES },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1479 	  "82580 gigabit Ethernet (SGMII)",
   1480 	  WM_T_82580,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1482 	  "82580 dual-1000BaseT Ethernet",
   1483 	  WM_T_82580,		WMP_F_COPPER },
   1484 
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1486 	  "82580 quad-1000BaseX Ethernet",
   1487 	  WM_T_82580,		WMP_F_FIBER },
   1488 
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1490 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1491 	  WM_T_82580,		WMP_F_COPPER },
   1492 
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1494 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1495 	  WM_T_82580,		WMP_F_SERDES },
   1496 
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1498 	  "DH89XXCC 1000BASE-KX Ethernet",
   1499 	  WM_T_82580,		WMP_F_SERDES },
   1500 
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1502 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1503 	  WM_T_82580,		WMP_F_SERDES },
   1504 
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1506 	  "I350 Gigabit Network Connection",
   1507 	  WM_T_I350,		WMP_F_COPPER },
   1508 
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1510 	  "I350 Gigabit Fiber Network Connection",
   1511 	  WM_T_I350,		WMP_F_FIBER },
   1512 
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1514 	  "I350 Gigabit Backplane Connection",
   1515 	  WM_T_I350,		WMP_F_SERDES },
   1516 
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1518 	  "I350 Quad Port Gigabit Ethernet",
   1519 	  WM_T_I350,		WMP_F_SERDES },
   1520 
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1522 	  "I350 Gigabit Connection",
   1523 	  WM_T_I350,		WMP_F_COPPER },
   1524 
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1526 	  "I354 Gigabit Ethernet (KX)",
   1527 	  WM_T_I354,		WMP_F_SERDES },
   1528 
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1530 	  "I354 Gigabit Ethernet (SGMII)",
   1531 	  WM_T_I354,		WMP_F_COPPER },
   1532 
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1534 	  "I354 Gigabit Ethernet (2.5G)",
   1535 	  WM_T_I354,		WMP_F_COPPER },
   1536 
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1538 	  "I210-T1 Ethernet Server Adapter",
   1539 	  WM_T_I210,		WMP_F_COPPER },
   1540 
   1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1542 	  "I210 Ethernet (Copper OEM)",
   1543 	  WM_T_I210,		WMP_F_COPPER },
   1544 
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1546 	  "I210 Ethernet (Copper IT)",
   1547 	  WM_T_I210,		WMP_F_COPPER },
   1548 
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1550 	  "I210 Ethernet (Copper, FLASH less)",
   1551 	  WM_T_I210,		WMP_F_COPPER },
   1552 
   1553 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1554 	  "I210 Gigabit Ethernet (Fiber)",
   1555 	  WM_T_I210,		WMP_F_FIBER },
   1556 
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1558 	  "I210 Gigabit Ethernet (SERDES)",
   1559 	  WM_T_I210,		WMP_F_SERDES },
   1560 
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1562 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1563 	  WM_T_I210,		WMP_F_SERDES },
   1564 
   1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1566 	  "I210 Gigabit Ethernet (SGMII)",
   1567 	  WM_T_I210,		WMP_F_COPPER },
   1568 
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1570 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1571 	  WM_T_I210,		WMP_F_COPPER },
   1572 
   1573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1574 	  "I211 Ethernet (COPPER)",
   1575 	  WM_T_I211,		WMP_F_COPPER },
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1577 	  "I217 V Ethernet Connection",
   1578 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1580 	  "I217 LM Ethernet Connection",
   1581 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1583 	  "I218 V Ethernet Connection",
   1584 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1585 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1586 	  "I218 V Ethernet Connection",
   1587 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1589 	  "I218 V Ethernet Connection",
   1590 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1592 	  "I218 LM Ethernet Connection",
   1593 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1595 	  "I218 LM Ethernet Connection",
   1596 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1597 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1598 	  "I218 LM Ethernet Connection",
   1599 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1601 	  "I219 LM Ethernet Connection",
   1602 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1604 	  "I219 LM (2) Ethernet Connection",
   1605 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1606 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1607 	  "I219 LM (3) Ethernet Connection",
   1608 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1609 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1610 	  "I219 LM (4) Ethernet Connection",
   1611 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1613 	  "I219 LM (5) Ethernet Connection",
   1614 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1616 	  "I219 LM (6) Ethernet Connection",
   1617 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1619 	  "I219 LM (7) Ethernet Connection",
   1620 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1622 	  "I219 LM (8) Ethernet Connection",
   1623 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1625 	  "I219 LM (9) Ethernet Connection",
   1626 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1628 	  "I219 LM (10) Ethernet Connection",
   1629 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1631 	  "I219 LM (11) Ethernet Connection",
   1632 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1634 	  "I219 LM (12) Ethernet Connection",
   1635 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1637 	  "I219 LM (13) Ethernet Connection",
   1638 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1640 	  "I219 LM (14) Ethernet Connection",
   1641 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1643 	  "I219 LM (15) Ethernet Connection",
   1644 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1646 	  "I219 LM (16) Ethernet Connection",
   1647 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1648 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1649 	  "I219 LM (17) Ethernet Connection",
   1650 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1652 	  "I219 LM (18) Ethernet Connection",
   1653 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1655 	  "I219 LM (19) Ethernet Connection",
   1656 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1658 	  "I219 V Ethernet Connection",
   1659 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1661 	  "I219 V (2) Ethernet Connection",
   1662 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1664 	  "I219 V (4) Ethernet Connection",
   1665 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1667 	  "I219 V (5) Ethernet Connection",
   1668 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1670 	  "I219 V (6) Ethernet Connection",
   1671 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1673 	  "I219 V (7) Ethernet Connection",
   1674 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1675 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1676 	  "I219 V (8) Ethernet Connection",
   1677 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1679 	  "I219 V (9) Ethernet Connection",
   1680 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1682 	  "I219 V (10) Ethernet Connection",
   1683 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1685 	  "I219 V (11) Ethernet Connection",
   1686 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1687 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1688 	  "I219 V (12) Ethernet Connection",
   1689 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1691 	  "I219 V (13) Ethernet Connection",
   1692 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1694 	  "I219 V (14) Ethernet Connection",
   1695 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1697 	  "I219 V (15) Ethernet Connection",
   1698 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1699 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1700 	  "I219 V (16) Ethernet Connection",
   1701 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1703 	  "I219 V (17) Ethernet Connection",
   1704 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1706 	  "I219 V (18) Ethernet Connection",
   1707 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1709 	  "I219 V (19) Ethernet Connection",
   1710 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1711 	{ 0,			0,
   1712 	  NULL,
   1713 	  0,			0 },
   1714 };
   1715 
   1716 /*
   1717  * Register read/write functions.
   1718  * Other than CSR_{READ|WRITE}().
   1719  */
   1720 
   1721 #if 0 /* Not currently used */
   1722 static inline uint32_t
   1723 wm_io_read(struct wm_softc *sc, int reg)
   1724 {
   1725 
   1726 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1727 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1728 }
   1729 #endif
   1730 
   1731 static inline void
   1732 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1733 {
   1734 
   1735 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1736 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1737 }
   1738 
   1739 static inline void
   1740 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1741     uint32_t data)
   1742 {
   1743 	uint32_t regval;
   1744 	int i;
   1745 
   1746 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1747 
   1748 	CSR_WRITE(sc, reg, regval);
   1749 
   1750 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1751 		delay(5);
   1752 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1753 			break;
   1754 	}
   1755 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1756 		aprint_error("%s: WARNING:"
   1757 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1758 		    device_xname(sc->sc_dev), reg);
   1759 	}
   1760 }
   1761 
   1762 static inline void
   1763 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1764 {
   1765 	wa->wa_low = htole32(v & 0xffffffffU);
   1766 	if (sizeof(bus_addr_t) == 8)
   1767 		wa->wa_high = htole32((uint64_t) v >> 32);
   1768 	else
   1769 		wa->wa_high = 0;
   1770 }
   1771 
   1772 /*
   1773  * Descriptor sync/init functions.
   1774  */
   1775 static inline void
   1776 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1777 {
   1778 	struct wm_softc *sc = txq->txq_sc;
   1779 
   1780 	/* If it will wrap around, sync to the end of the ring. */
   1781 	if ((start + num) > WM_NTXDESC(txq)) {
   1782 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1783 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1784 		    (WM_NTXDESC(txq) - start), ops);
   1785 		num -= (WM_NTXDESC(txq) - start);
   1786 		start = 0;
   1787 	}
   1788 
   1789 	/* Now sync whatever is left. */
   1790 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1791 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1792 }
   1793 
   1794 static inline void
   1795 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1796 {
   1797 	struct wm_softc *sc = rxq->rxq_sc;
   1798 
   1799 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1800 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1801 }
   1802 
   1803 static inline void
   1804 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1805 {
   1806 	struct wm_softc *sc = rxq->rxq_sc;
   1807 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1808 	struct mbuf *m = rxs->rxs_mbuf;
   1809 
   1810 	/*
   1811 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1812 	 * so that the payload after the Ethernet header is aligned
   1813 	 * to a 4-byte boundary.
   1814 
   1815 	 * XXX BRAINDAMAGE ALERT!
   1816 	 * The stupid chip uses the same size for every buffer, which
   1817 	 * is set in the Receive Control register.  We are using the 2K
   1818 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1819 	 * reason, we can't "scoot" packets longer than the standard
   1820 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1821 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1822 	 * the upper layer copy the headers.
   1823 	 */
   1824 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1825 
   1826 	if (sc->sc_type == WM_T_82574) {
   1827 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1828 		rxd->erx_data.erxd_addr =
   1829 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1830 		rxd->erx_data.erxd_dd = 0;
   1831 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1832 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1833 
   1834 		rxd->nqrx_data.nrxd_paddr =
   1835 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1836 		/* Currently, split header is not supported. */
   1837 		rxd->nqrx_data.nrxd_haddr = 0;
   1838 	} else {
   1839 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1840 
   1841 		wm_set_dma_addr(&rxd->wrx_addr,
   1842 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1843 		rxd->wrx_len = 0;
   1844 		rxd->wrx_cksum = 0;
   1845 		rxd->wrx_status = 0;
   1846 		rxd->wrx_errors = 0;
   1847 		rxd->wrx_special = 0;
   1848 	}
   1849 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1850 
   1851 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1852 }
   1853 
   1854 /*
   1855  * Device driver interface functions and commonly used functions.
   1856  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1857  */
   1858 
   1859 /* Lookup supported device table */
   1860 static const struct wm_product *
   1861 wm_lookup(const struct pci_attach_args *pa)
   1862 {
   1863 	const struct wm_product *wmp;
   1864 
   1865 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1866 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1867 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1868 			return wmp;
   1869 	}
   1870 	return NULL;
   1871 }
   1872 
   1873 /* The match function (ca_match) */
   1874 static int
   1875 wm_match(device_t parent, cfdata_t cf, void *aux)
   1876 {
   1877 	struct pci_attach_args *pa = aux;
   1878 
   1879 	if (wm_lookup(pa) != NULL)
   1880 		return 1;
   1881 
   1882 	return 0;
   1883 }
   1884 
   1885 /* The attach function (ca_attach) */
   1886 static void
   1887 wm_attach(device_t parent, device_t self, void *aux)
   1888 {
   1889 	struct wm_softc *sc = device_private(self);
   1890 	struct pci_attach_args *pa = aux;
   1891 	prop_dictionary_t dict;
   1892 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1893 	pci_chipset_tag_t pc = pa->pa_pc;
   1894 	int counts[PCI_INTR_TYPE_SIZE];
   1895 	pci_intr_type_t max_type;
   1896 	const char *eetype, *xname;
   1897 	bus_space_tag_t memt;
   1898 	bus_space_handle_t memh;
   1899 	bus_size_t memsize;
   1900 	int memh_valid;
   1901 	int i, error;
   1902 	const struct wm_product *wmp;
   1903 	prop_data_t ea;
   1904 	prop_number_t pn;
   1905 	uint8_t enaddr[ETHER_ADDR_LEN];
   1906 	char buf[256];
   1907 	char wqname[MAXCOMLEN];
   1908 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1909 	pcireg_t preg, memtype;
   1910 	uint16_t eeprom_data, apme_mask;
   1911 	bool force_clear_smbi;
   1912 	uint32_t link_mode;
   1913 	uint32_t reg;
   1914 
   1915 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1916 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1917 #endif
   1918 	sc->sc_dev = self;
   1919 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1920 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1921 	sc->sc_core_stopping = false;
   1922 
   1923 	wmp = wm_lookup(pa);
   1924 #ifdef DIAGNOSTIC
   1925 	if (wmp == NULL) {
   1926 		printf("\n");
   1927 		panic("wm_attach: impossible");
   1928 	}
   1929 #endif
   1930 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1931 
   1932 	sc->sc_pc = pa->pa_pc;
   1933 	sc->sc_pcitag = pa->pa_tag;
   1934 
   1935 	if (pci_dma64_available(pa))
   1936 		sc->sc_dmat = pa->pa_dmat64;
   1937 	else
   1938 		sc->sc_dmat = pa->pa_dmat;
   1939 
   1940 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1941 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1942 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1943 
   1944 	sc->sc_type = wmp->wmp_type;
   1945 
   1946 	/* Set default function pointers */
   1947 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1948 	sc->phy.release = sc->nvm.release = wm_put_null;
   1949 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1950 
   1951 	if (sc->sc_type < WM_T_82543) {
   1952 		if (sc->sc_rev < 2) {
   1953 			aprint_error_dev(sc->sc_dev,
   1954 			    "i82542 must be at least rev. 2\n");
   1955 			return;
   1956 		}
   1957 		if (sc->sc_rev < 3)
   1958 			sc->sc_type = WM_T_82542_2_0;
   1959 	}
   1960 
   1961 	/*
   1962 	 * Disable MSI for Errata:
   1963 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1964 	 *
   1965 	 *  82544: Errata 25
   1966 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1967 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1968 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1969 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1970 	 *
   1971 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1972 	 *
   1973 	 *  82571 & 82572: Errata 63
   1974 	 */
   1975 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1976 	    || (sc->sc_type == WM_T_82572))
   1977 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1978 
   1979 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1980 	    || (sc->sc_type == WM_T_82580)
   1981 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1982 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1983 		sc->sc_flags |= WM_F_NEWQUEUE;
   1984 
   1985 	/* Set device properties (mactype) */
   1986 	dict = device_properties(sc->sc_dev);
   1987 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1988 
   1989 	/*
   1990 	 * Map the device.  All devices support memory-mapped acccess,
   1991 	 * and it is really required for normal operation.
   1992 	 */
   1993 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1994 	switch (memtype) {
   1995 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1996 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1997 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1998 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1999 		break;
   2000 	default:
   2001 		memh_valid = 0;
   2002 		break;
   2003 	}
   2004 
   2005 	if (memh_valid) {
   2006 		sc->sc_st = memt;
   2007 		sc->sc_sh = memh;
   2008 		sc->sc_ss = memsize;
   2009 	} else {
   2010 		aprint_error_dev(sc->sc_dev,
   2011 		    "unable to map device registers\n");
   2012 		return;
   2013 	}
   2014 
   2015 	/*
   2016 	 * In addition, i82544 and later support I/O mapped indirect
   2017 	 * register access.  It is not desirable (nor supported in
   2018 	 * this driver) to use it for normal operation, though it is
   2019 	 * required to work around bugs in some chip versions.
   2020 	 */
   2021 	switch (sc->sc_type) {
   2022 	case WM_T_82544:
   2023 	case WM_T_82541:
   2024 	case WM_T_82541_2:
   2025 	case WM_T_82547:
   2026 	case WM_T_82547_2:
   2027 		/* First we have to find the I/O BAR. */
   2028 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2029 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2030 			if (memtype == PCI_MAPREG_TYPE_IO)
   2031 				break;
   2032 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2033 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2034 				i += 4;	/* skip high bits, too */
   2035 		}
   2036 		if (i < PCI_MAPREG_END) {
   2037 			/*
   2038 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2039 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2040 			 * It's no problem because newer chips has no this
   2041 			 * bug.
   2042 			 *
   2043 			 * The i8254x doesn't apparently respond when the
   2044 			 * I/O BAR is 0, which looks somewhat like it's not
   2045 			 * been configured.
   2046 			 */
   2047 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2048 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2049 				aprint_error_dev(sc->sc_dev,
   2050 				    "WARNING: I/O BAR at zero.\n");
   2051 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2052 					0, &sc->sc_iot, &sc->sc_ioh,
   2053 					NULL, &sc->sc_ios) == 0) {
   2054 				sc->sc_flags |= WM_F_IOH_VALID;
   2055 			} else
   2056 				aprint_error_dev(sc->sc_dev,
   2057 				    "WARNING: unable to map I/O space\n");
   2058 		}
   2059 		break;
   2060 	default:
   2061 		break;
   2062 	}
   2063 
   2064 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2065 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2066 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2067 	if (sc->sc_type < WM_T_82542_2_1)
   2068 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2069 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2070 
   2071 	/* Power up chip */
   2072 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2073 	    && error != EOPNOTSUPP) {
   2074 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2075 		return;
   2076 	}
   2077 
   2078 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2079 	/*
   2080 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2081 	 * resource.
   2082 	 */
   2083 	if (sc->sc_nqueues > 1) {
   2084 		max_type = PCI_INTR_TYPE_MSIX;
   2085 		/*
   2086 		 *  82583 has a MSI-X capability in the PCI configuration space
   2087 		 * but it doesn't support it. At least the document doesn't
   2088 		 * say anything about MSI-X.
   2089 		 */
   2090 		counts[PCI_INTR_TYPE_MSIX]
   2091 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2092 	} else {
   2093 		max_type = PCI_INTR_TYPE_MSI;
   2094 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2095 	}
   2096 
   2097 	/* Allocation settings */
   2098 	counts[PCI_INTR_TYPE_MSI] = 1;
   2099 	counts[PCI_INTR_TYPE_INTX] = 1;
   2100 	/* overridden by disable flags */
   2101 	if (wm_disable_msi != 0) {
   2102 		counts[PCI_INTR_TYPE_MSI] = 0;
   2103 		if (wm_disable_msix != 0) {
   2104 			max_type = PCI_INTR_TYPE_INTX;
   2105 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2106 		}
   2107 	} else if (wm_disable_msix != 0) {
   2108 		max_type = PCI_INTR_TYPE_MSI;
   2109 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2110 	}
   2111 
   2112 alloc_retry:
   2113 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2114 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2115 		return;
   2116 	}
   2117 
   2118 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2119 		error = wm_setup_msix(sc);
   2120 		if (error) {
   2121 			pci_intr_release(pc, sc->sc_intrs,
   2122 			    counts[PCI_INTR_TYPE_MSIX]);
   2123 
   2124 			/* Setup for MSI: Disable MSI-X */
   2125 			max_type = PCI_INTR_TYPE_MSI;
   2126 			counts[PCI_INTR_TYPE_MSI] = 1;
   2127 			counts[PCI_INTR_TYPE_INTX] = 1;
   2128 			goto alloc_retry;
   2129 		}
   2130 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2131 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2132 		error = wm_setup_legacy(sc);
   2133 		if (error) {
   2134 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2135 			    counts[PCI_INTR_TYPE_MSI]);
   2136 
   2137 			/* The next try is for INTx: Disable MSI */
   2138 			max_type = PCI_INTR_TYPE_INTX;
   2139 			counts[PCI_INTR_TYPE_INTX] = 1;
   2140 			goto alloc_retry;
   2141 		}
   2142 	} else {
   2143 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2144 		error = wm_setup_legacy(sc);
   2145 		if (error) {
   2146 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2147 			    counts[PCI_INTR_TYPE_INTX]);
   2148 			return;
   2149 		}
   2150 	}
   2151 
   2152 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2153 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2154 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2155 	    WM_WORKQUEUE_FLAGS);
   2156 	if (error) {
   2157 		aprint_error_dev(sc->sc_dev,
   2158 		    "unable to create workqueue\n");
   2159 		goto out;
   2160 	}
   2161 
   2162 	/*
   2163 	 * Check the function ID (unit number of the chip).
   2164 	 */
   2165 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2166 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2167 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2168 	    || (sc->sc_type == WM_T_82580)
   2169 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2170 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2171 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2172 	else
   2173 		sc->sc_funcid = 0;
   2174 
   2175 	/*
   2176 	 * Determine a few things about the bus we're connected to.
   2177 	 */
   2178 	if (sc->sc_type < WM_T_82543) {
   2179 		/* We don't really know the bus characteristics here. */
   2180 		sc->sc_bus_speed = 33;
   2181 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2182 		/*
   2183 		 * CSA (Communication Streaming Architecture) is about as fast
   2184 		 * a 32-bit 66MHz PCI Bus.
   2185 		 */
   2186 		sc->sc_flags |= WM_F_CSA;
   2187 		sc->sc_bus_speed = 66;
   2188 		aprint_verbose_dev(sc->sc_dev,
   2189 		    "Communication Streaming Architecture\n");
   2190 		if (sc->sc_type == WM_T_82547) {
   2191 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2192 			callout_setfunc(&sc->sc_txfifo_ch,
   2193 			    wm_82547_txfifo_stall, sc);
   2194 			aprint_verbose_dev(sc->sc_dev,
   2195 			    "using 82547 Tx FIFO stall work-around\n");
   2196 		}
   2197 	} else if (sc->sc_type >= WM_T_82571) {
   2198 		sc->sc_flags |= WM_F_PCIE;
   2199 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2200 		    && (sc->sc_type != WM_T_ICH10)
   2201 		    && (sc->sc_type != WM_T_PCH)
   2202 		    && (sc->sc_type != WM_T_PCH2)
   2203 		    && (sc->sc_type != WM_T_PCH_LPT)
   2204 		    && (sc->sc_type != WM_T_PCH_SPT)
   2205 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2206 			/* ICH* and PCH* have no PCIe capability registers */
   2207 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2208 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2209 				NULL) == 0)
   2210 				aprint_error_dev(sc->sc_dev,
   2211 				    "unable to find PCIe capability\n");
   2212 		}
   2213 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2214 	} else {
   2215 		reg = CSR_READ(sc, WMREG_STATUS);
   2216 		if (reg & STATUS_BUS64)
   2217 			sc->sc_flags |= WM_F_BUS64;
   2218 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2219 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2220 
   2221 			sc->sc_flags |= WM_F_PCIX;
   2222 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2223 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2224 				aprint_error_dev(sc->sc_dev,
   2225 				    "unable to find PCIX capability\n");
   2226 			else if (sc->sc_type != WM_T_82545_3 &&
   2227 				 sc->sc_type != WM_T_82546_3) {
   2228 				/*
   2229 				 * Work around a problem caused by the BIOS
   2230 				 * setting the max memory read byte count
   2231 				 * incorrectly.
   2232 				 */
   2233 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2234 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2235 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2236 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2237 
   2238 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2239 				    PCIX_CMD_BYTECNT_SHIFT;
   2240 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2241 				    PCIX_STATUS_MAXB_SHIFT;
   2242 				if (bytecnt > maxb) {
   2243 					aprint_verbose_dev(sc->sc_dev,
   2244 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2245 					    512 << bytecnt, 512 << maxb);
   2246 					pcix_cmd = (pcix_cmd &
   2247 					    ~PCIX_CMD_BYTECNT_MASK) |
   2248 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2249 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2250 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2251 					    pcix_cmd);
   2252 				}
   2253 			}
   2254 		}
   2255 		/*
   2256 		 * The quad port adapter is special; it has a PCIX-PCIX
   2257 		 * bridge on the board, and can run the secondary bus at
   2258 		 * a higher speed.
   2259 		 */
   2260 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2261 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2262 								      : 66;
   2263 		} else if (sc->sc_flags & WM_F_PCIX) {
   2264 			switch (reg & STATUS_PCIXSPD_MASK) {
   2265 			case STATUS_PCIXSPD_50_66:
   2266 				sc->sc_bus_speed = 66;
   2267 				break;
   2268 			case STATUS_PCIXSPD_66_100:
   2269 				sc->sc_bus_speed = 100;
   2270 				break;
   2271 			case STATUS_PCIXSPD_100_133:
   2272 				sc->sc_bus_speed = 133;
   2273 				break;
   2274 			default:
   2275 				aprint_error_dev(sc->sc_dev,
   2276 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2277 				    reg & STATUS_PCIXSPD_MASK);
   2278 				sc->sc_bus_speed = 66;
   2279 				break;
   2280 			}
   2281 		} else
   2282 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2283 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2284 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2285 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2286 	}
   2287 
   2288 	/* clear interesting stat counters */
   2289 	CSR_READ(sc, WMREG_COLC);
   2290 	CSR_READ(sc, WMREG_RXERRC);
   2291 
   2292 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2293 	    || (sc->sc_type >= WM_T_ICH8))
   2294 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2295 	if (sc->sc_type >= WM_T_ICH8)
   2296 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2297 
   2298 	/* Set PHY, NVM mutex related stuff */
   2299 	switch (sc->sc_type) {
   2300 	case WM_T_82542_2_0:
   2301 	case WM_T_82542_2_1:
   2302 	case WM_T_82543:
   2303 	case WM_T_82544:
   2304 		/* Microwire */
   2305 		sc->nvm.read = wm_nvm_read_uwire;
   2306 		sc->sc_nvm_wordsize = 64;
   2307 		sc->sc_nvm_addrbits = 6;
   2308 		break;
   2309 	case WM_T_82540:
   2310 	case WM_T_82545:
   2311 	case WM_T_82545_3:
   2312 	case WM_T_82546:
   2313 	case WM_T_82546_3:
   2314 		/* Microwire */
   2315 		sc->nvm.read = wm_nvm_read_uwire;
   2316 		reg = CSR_READ(sc, WMREG_EECD);
   2317 		if (reg & EECD_EE_SIZE) {
   2318 			sc->sc_nvm_wordsize = 256;
   2319 			sc->sc_nvm_addrbits = 8;
   2320 		} else {
   2321 			sc->sc_nvm_wordsize = 64;
   2322 			sc->sc_nvm_addrbits = 6;
   2323 		}
   2324 		sc->sc_flags |= WM_F_LOCK_EECD;
   2325 		sc->nvm.acquire = wm_get_eecd;
   2326 		sc->nvm.release = wm_put_eecd;
   2327 		break;
   2328 	case WM_T_82541:
   2329 	case WM_T_82541_2:
   2330 	case WM_T_82547:
   2331 	case WM_T_82547_2:
   2332 		reg = CSR_READ(sc, WMREG_EECD);
   2333 		/*
   2334 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2335 		 * on 8254[17], so set flags and functios before calling it.
   2336 		 */
   2337 		sc->sc_flags |= WM_F_LOCK_EECD;
   2338 		sc->nvm.acquire = wm_get_eecd;
   2339 		sc->nvm.release = wm_put_eecd;
   2340 		if (reg & EECD_EE_TYPE) {
   2341 			/* SPI */
   2342 			sc->nvm.read = wm_nvm_read_spi;
   2343 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2344 			wm_nvm_set_addrbits_size_eecd(sc);
   2345 		} else {
   2346 			/* Microwire */
   2347 			sc->nvm.read = wm_nvm_read_uwire;
   2348 			if ((reg & EECD_EE_ABITS) != 0) {
   2349 				sc->sc_nvm_wordsize = 256;
   2350 				sc->sc_nvm_addrbits = 8;
   2351 			} else {
   2352 				sc->sc_nvm_wordsize = 64;
   2353 				sc->sc_nvm_addrbits = 6;
   2354 			}
   2355 		}
   2356 		break;
   2357 	case WM_T_82571:
   2358 	case WM_T_82572:
   2359 		/* SPI */
   2360 		sc->nvm.read = wm_nvm_read_eerd;
   2361 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2362 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2363 		wm_nvm_set_addrbits_size_eecd(sc);
   2364 		sc->phy.acquire = wm_get_swsm_semaphore;
   2365 		sc->phy.release = wm_put_swsm_semaphore;
   2366 		sc->nvm.acquire = wm_get_nvm_82571;
   2367 		sc->nvm.release = wm_put_nvm_82571;
   2368 		break;
   2369 	case WM_T_82573:
   2370 	case WM_T_82574:
   2371 	case WM_T_82583:
   2372 		sc->nvm.read = wm_nvm_read_eerd;
   2373 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2374 		if (sc->sc_type == WM_T_82573) {
   2375 			sc->phy.acquire = wm_get_swsm_semaphore;
   2376 			sc->phy.release = wm_put_swsm_semaphore;
   2377 			sc->nvm.acquire = wm_get_nvm_82571;
   2378 			sc->nvm.release = wm_put_nvm_82571;
   2379 		} else {
   2380 			/* Both PHY and NVM use the same semaphore. */
   2381 			sc->phy.acquire = sc->nvm.acquire
   2382 			    = wm_get_swfwhw_semaphore;
   2383 			sc->phy.release = sc->nvm.release
   2384 			    = wm_put_swfwhw_semaphore;
   2385 		}
   2386 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2387 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2388 			sc->sc_nvm_wordsize = 2048;
   2389 		} else {
   2390 			/* SPI */
   2391 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2392 			wm_nvm_set_addrbits_size_eecd(sc);
   2393 		}
   2394 		break;
   2395 	case WM_T_82575:
   2396 	case WM_T_82576:
   2397 	case WM_T_82580:
   2398 	case WM_T_I350:
   2399 	case WM_T_I354:
   2400 	case WM_T_80003:
   2401 		/* SPI */
   2402 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2403 		wm_nvm_set_addrbits_size_eecd(sc);
   2404 		if ((sc->sc_type == WM_T_80003)
   2405 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2406 			sc->nvm.read = wm_nvm_read_eerd;
   2407 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2408 		} else {
   2409 			sc->nvm.read = wm_nvm_read_spi;
   2410 			sc->sc_flags |= WM_F_LOCK_EECD;
   2411 		}
   2412 		sc->phy.acquire = wm_get_phy_82575;
   2413 		sc->phy.release = wm_put_phy_82575;
   2414 		sc->nvm.acquire = wm_get_nvm_80003;
   2415 		sc->nvm.release = wm_put_nvm_80003;
   2416 		break;
   2417 	case WM_T_ICH8:
   2418 	case WM_T_ICH9:
   2419 	case WM_T_ICH10:
   2420 	case WM_T_PCH:
   2421 	case WM_T_PCH2:
   2422 	case WM_T_PCH_LPT:
   2423 		sc->nvm.read = wm_nvm_read_ich8;
   2424 		/* FLASH */
   2425 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2426 		sc->sc_nvm_wordsize = 2048;
   2427 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2428 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2429 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2430 			aprint_error_dev(sc->sc_dev,
   2431 			    "can't map FLASH registers\n");
   2432 			goto out;
   2433 		}
   2434 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2435 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2436 		    ICH_FLASH_SECTOR_SIZE;
   2437 		sc->sc_ich8_flash_bank_size =
   2438 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2439 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2440 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2441 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2442 		sc->sc_flashreg_offset = 0;
   2443 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2444 		sc->phy.release = wm_put_swflag_ich8lan;
   2445 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2446 		sc->nvm.release = wm_put_nvm_ich8lan;
   2447 		break;
   2448 	case WM_T_PCH_SPT:
   2449 	case WM_T_PCH_CNP:
   2450 		sc->nvm.read = wm_nvm_read_spt;
   2451 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2452 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2453 		sc->sc_flasht = sc->sc_st;
   2454 		sc->sc_flashh = sc->sc_sh;
   2455 		sc->sc_ich8_flash_base = 0;
   2456 		sc->sc_nvm_wordsize =
   2457 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2458 		    * NVM_SIZE_MULTIPLIER;
   2459 		/* It is size in bytes, we want words */
   2460 		sc->sc_nvm_wordsize /= 2;
   2461 		/* Assume 2 banks */
   2462 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2463 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2464 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2465 		sc->phy.release = wm_put_swflag_ich8lan;
   2466 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2467 		sc->nvm.release = wm_put_nvm_ich8lan;
   2468 		break;
   2469 	case WM_T_I210:
   2470 	case WM_T_I211:
   2471 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2472 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2473 		if (wm_nvm_flash_presence_i210(sc)) {
   2474 			sc->nvm.read = wm_nvm_read_eerd;
   2475 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2476 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2477 			wm_nvm_set_addrbits_size_eecd(sc);
   2478 		} else {
   2479 			sc->nvm.read = wm_nvm_read_invm;
   2480 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2481 			sc->sc_nvm_wordsize = INVM_SIZE;
   2482 		}
   2483 		sc->phy.acquire = wm_get_phy_82575;
   2484 		sc->phy.release = wm_put_phy_82575;
   2485 		sc->nvm.acquire = wm_get_nvm_80003;
   2486 		sc->nvm.release = wm_put_nvm_80003;
   2487 		break;
   2488 	default:
   2489 		break;
   2490 	}
   2491 
   2492 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2493 	switch (sc->sc_type) {
   2494 	case WM_T_82571:
   2495 	case WM_T_82572:
   2496 		reg = CSR_READ(sc, WMREG_SWSM2);
   2497 		if ((reg & SWSM2_LOCK) == 0) {
   2498 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2499 			force_clear_smbi = true;
   2500 		} else
   2501 			force_clear_smbi = false;
   2502 		break;
   2503 	case WM_T_82573:
   2504 	case WM_T_82574:
   2505 	case WM_T_82583:
   2506 		force_clear_smbi = true;
   2507 		break;
   2508 	default:
   2509 		force_clear_smbi = false;
   2510 		break;
   2511 	}
   2512 	if (force_clear_smbi) {
   2513 		reg = CSR_READ(sc, WMREG_SWSM);
   2514 		if ((reg & SWSM_SMBI) != 0)
   2515 			aprint_error_dev(sc->sc_dev,
   2516 			    "Please update the Bootagent\n");
   2517 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2518 	}
   2519 
   2520 	/*
   2521 	 * Defer printing the EEPROM type until after verifying the checksum
   2522 	 * This allows the EEPROM type to be printed correctly in the case
   2523 	 * that no EEPROM is attached.
   2524 	 */
   2525 	/*
   2526 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2527 	 * this for later, so we can fail future reads from the EEPROM.
   2528 	 */
   2529 	if (wm_nvm_validate_checksum(sc)) {
   2530 		/*
   2531 		 * Read twice again because some PCI-e parts fail the
   2532 		 * first check due to the link being in sleep state.
   2533 		 */
   2534 		if (wm_nvm_validate_checksum(sc))
   2535 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2536 	}
   2537 
   2538 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2539 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2540 	else {
   2541 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2542 		    sc->sc_nvm_wordsize);
   2543 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2544 			aprint_verbose("iNVM");
   2545 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2546 			aprint_verbose("FLASH(HW)");
   2547 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2548 			aprint_verbose("FLASH");
   2549 		else {
   2550 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2551 				eetype = "SPI";
   2552 			else
   2553 				eetype = "MicroWire";
   2554 			aprint_verbose("(%d address bits) %s EEPROM",
   2555 			    sc->sc_nvm_addrbits, eetype);
   2556 		}
   2557 	}
   2558 	wm_nvm_version(sc);
   2559 	aprint_verbose("\n");
   2560 
   2561 	/*
   2562 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2563 	 * incorrect.
   2564 	 */
   2565 	wm_gmii_setup_phytype(sc, 0, 0);
   2566 
   2567 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2568 	switch (sc->sc_type) {
   2569 	case WM_T_ICH8:
   2570 	case WM_T_ICH9:
   2571 	case WM_T_ICH10:
   2572 	case WM_T_PCH:
   2573 	case WM_T_PCH2:
   2574 	case WM_T_PCH_LPT:
   2575 	case WM_T_PCH_SPT:
   2576 	case WM_T_PCH_CNP:
   2577 		apme_mask = WUC_APME;
   2578 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2579 		if ((eeprom_data & apme_mask) != 0)
   2580 			sc->sc_flags |= WM_F_WOL;
   2581 		break;
   2582 	default:
   2583 		break;
   2584 	}
   2585 
   2586 	/* Reset the chip to a known state. */
   2587 	wm_reset(sc);
   2588 
   2589 	/*
   2590 	 * Check for I21[01] PLL workaround.
   2591 	 *
   2592 	 * Three cases:
   2593 	 * a) Chip is I211.
   2594 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2595 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2596 	 */
   2597 	if (sc->sc_type == WM_T_I211)
   2598 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2599 	if (sc->sc_type == WM_T_I210) {
   2600 		if (!wm_nvm_flash_presence_i210(sc))
   2601 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2602 		else if ((sc->sc_nvm_ver_major < 3)
   2603 		    || ((sc->sc_nvm_ver_major == 3)
   2604 			&& (sc->sc_nvm_ver_minor < 25))) {
   2605 			aprint_verbose_dev(sc->sc_dev,
   2606 			    "ROM image version %d.%d is older than 3.25\n",
   2607 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2608 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2609 		}
   2610 	}
   2611 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2612 		wm_pll_workaround_i210(sc);
   2613 
   2614 	wm_get_wakeup(sc);
   2615 
   2616 	/* Non-AMT based hardware can now take control from firmware */
   2617 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2618 		wm_get_hw_control(sc);
   2619 
   2620 	/*
   2621 	 * Read the Ethernet address from the EEPROM, if not first found
   2622 	 * in device properties.
   2623 	 */
   2624 	ea = prop_dictionary_get(dict, "mac-address");
   2625 	if (ea != NULL) {
   2626 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2627 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2628 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2629 	} else {
   2630 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2631 			aprint_error_dev(sc->sc_dev,
   2632 			    "unable to read Ethernet address\n");
   2633 			goto out;
   2634 		}
   2635 	}
   2636 
   2637 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2638 	    ether_sprintf(enaddr));
   2639 
   2640 	/*
   2641 	 * Read the config info from the EEPROM, and set up various
   2642 	 * bits in the control registers based on their contents.
   2643 	 */
   2644 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2645 	if (pn != NULL) {
   2646 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2647 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2648 	} else {
   2649 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2650 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2651 			goto out;
   2652 		}
   2653 	}
   2654 
   2655 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2656 	if (pn != NULL) {
   2657 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2658 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2659 	} else {
   2660 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2661 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2662 			goto out;
   2663 		}
   2664 	}
   2665 
   2666 	/* check for WM_F_WOL */
   2667 	switch (sc->sc_type) {
   2668 	case WM_T_82542_2_0:
   2669 	case WM_T_82542_2_1:
   2670 	case WM_T_82543:
   2671 		/* dummy? */
   2672 		eeprom_data = 0;
   2673 		apme_mask = NVM_CFG3_APME;
   2674 		break;
   2675 	case WM_T_82544:
   2676 		apme_mask = NVM_CFG2_82544_APM_EN;
   2677 		eeprom_data = cfg2;
   2678 		break;
   2679 	case WM_T_82546:
   2680 	case WM_T_82546_3:
   2681 	case WM_T_82571:
   2682 	case WM_T_82572:
   2683 	case WM_T_82573:
   2684 	case WM_T_82574:
   2685 	case WM_T_82583:
   2686 	case WM_T_80003:
   2687 	case WM_T_82575:
   2688 	case WM_T_82576:
   2689 		apme_mask = NVM_CFG3_APME;
   2690 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2691 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2692 		break;
   2693 	case WM_T_82580:
   2694 	case WM_T_I350:
   2695 	case WM_T_I354:
   2696 	case WM_T_I210:
   2697 	case WM_T_I211:
   2698 		apme_mask = NVM_CFG3_APME;
   2699 		wm_nvm_read(sc,
   2700 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2701 		    1, &eeprom_data);
   2702 		break;
   2703 	case WM_T_ICH8:
   2704 	case WM_T_ICH9:
   2705 	case WM_T_ICH10:
   2706 	case WM_T_PCH:
   2707 	case WM_T_PCH2:
   2708 	case WM_T_PCH_LPT:
   2709 	case WM_T_PCH_SPT:
   2710 	case WM_T_PCH_CNP:
   2711 		/* Already checked before wm_reset () */
   2712 		apme_mask = eeprom_data = 0;
   2713 		break;
   2714 	default: /* XXX 82540 */
   2715 		apme_mask = NVM_CFG3_APME;
   2716 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2717 		break;
   2718 	}
   2719 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2720 	if ((eeprom_data & apme_mask) != 0)
   2721 		sc->sc_flags |= WM_F_WOL;
   2722 
   2723 	/*
   2724 	 * We have the eeprom settings, now apply the special cases
   2725 	 * where the eeprom may be wrong or the board won't support
   2726 	 * wake on lan on a particular port
   2727 	 */
   2728 	switch (sc->sc_pcidevid) {
   2729 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2730 		sc->sc_flags &= ~WM_F_WOL;
   2731 		break;
   2732 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2733 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2734 		/* Wake events only supported on port A for dual fiber
   2735 		 * regardless of eeprom setting */
   2736 		if (sc->sc_funcid == 1)
   2737 			sc->sc_flags &= ~WM_F_WOL;
   2738 		break;
   2739 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2740 		/* If quad port adapter, disable WoL on all but port A */
   2741 		if (sc->sc_funcid != 0)
   2742 			sc->sc_flags &= ~WM_F_WOL;
   2743 		break;
   2744 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2745 		/* Wake events only supported on port A for dual fiber
   2746 		 * regardless of eeprom setting */
   2747 		if (sc->sc_funcid == 1)
   2748 			sc->sc_flags &= ~WM_F_WOL;
   2749 		break;
   2750 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2751 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2752 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2753 		/* If quad port adapter, disable WoL on all but port A */
   2754 		if (sc->sc_funcid != 0)
   2755 			sc->sc_flags &= ~WM_F_WOL;
   2756 		break;
   2757 	}
   2758 
   2759 	if (sc->sc_type >= WM_T_82575) {
   2760 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2761 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2762 			    nvmword);
   2763 			if ((sc->sc_type == WM_T_82575) ||
   2764 			    (sc->sc_type == WM_T_82576)) {
   2765 				/* Check NVM for autonegotiation */
   2766 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2767 				    != 0)
   2768 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2769 			}
   2770 			if ((sc->sc_type == WM_T_82575) ||
   2771 			    (sc->sc_type == WM_T_I350)) {
   2772 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2773 					sc->sc_flags |= WM_F_MAS;
   2774 			}
   2775 		}
   2776 	}
   2777 
   2778 	/*
   2779 	 * XXX need special handling for some multiple port cards
   2780 	 * to disable a paticular port.
   2781 	 */
   2782 
   2783 	if (sc->sc_type >= WM_T_82544) {
   2784 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2785 		if (pn != NULL) {
   2786 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2787 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2788 		} else {
   2789 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2790 				aprint_error_dev(sc->sc_dev,
   2791 				    "unable to read SWDPIN\n");
   2792 				goto out;
   2793 			}
   2794 		}
   2795 	}
   2796 
   2797 	if (cfg1 & NVM_CFG1_ILOS)
   2798 		sc->sc_ctrl |= CTRL_ILOS;
   2799 
   2800 	/*
   2801 	 * XXX
   2802 	 * This code isn't correct because pin 2 and 3 are located
   2803 	 * in different position on newer chips. Check all datasheet.
   2804 	 *
   2805 	 * Until resolve this problem, check if a chip < 82580
   2806 	 */
   2807 	if (sc->sc_type <= WM_T_82580) {
   2808 		if (sc->sc_type >= WM_T_82544) {
   2809 			sc->sc_ctrl |=
   2810 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2811 			    CTRL_SWDPIO_SHIFT;
   2812 			sc->sc_ctrl |=
   2813 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2814 			    CTRL_SWDPINS_SHIFT;
   2815 		} else {
   2816 			sc->sc_ctrl |=
   2817 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2818 			    CTRL_SWDPIO_SHIFT;
   2819 		}
   2820 	}
   2821 
   2822 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2823 		wm_nvm_read(sc,
   2824 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2825 		    1, &nvmword);
   2826 		if (nvmword & NVM_CFG3_ILOS)
   2827 			sc->sc_ctrl |= CTRL_ILOS;
   2828 	}
   2829 
   2830 #if 0
   2831 	if (sc->sc_type >= WM_T_82544) {
   2832 		if (cfg1 & NVM_CFG1_IPS0)
   2833 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2834 		if (cfg1 & NVM_CFG1_IPS1)
   2835 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2836 		sc->sc_ctrl_ext |=
   2837 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2838 		    CTRL_EXT_SWDPIO_SHIFT;
   2839 		sc->sc_ctrl_ext |=
   2840 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2841 		    CTRL_EXT_SWDPINS_SHIFT;
   2842 	} else {
   2843 		sc->sc_ctrl_ext |=
   2844 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2845 		    CTRL_EXT_SWDPIO_SHIFT;
   2846 	}
   2847 #endif
   2848 
   2849 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2850 #if 0
   2851 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2852 #endif
   2853 
   2854 	if (sc->sc_type == WM_T_PCH) {
   2855 		uint16_t val;
   2856 
   2857 		/* Save the NVM K1 bit setting */
   2858 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2859 
   2860 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2861 			sc->sc_nvm_k1_enabled = 1;
   2862 		else
   2863 			sc->sc_nvm_k1_enabled = 0;
   2864 	}
   2865 
   2866 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2867 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2868 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2869 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2870 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2871 	    || sc->sc_type == WM_T_82573
   2872 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2873 		/* Copper only */
   2874 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2875 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2876 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2877 	    || (sc->sc_type ==WM_T_I211)) {
   2878 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2879 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2880 		switch (link_mode) {
   2881 		case CTRL_EXT_LINK_MODE_1000KX:
   2882 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2883 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2884 			break;
   2885 		case CTRL_EXT_LINK_MODE_SGMII:
   2886 			if (wm_sgmii_uses_mdio(sc)) {
   2887 				aprint_normal_dev(sc->sc_dev,
   2888 				    "SGMII(MDIO)\n");
   2889 				sc->sc_flags |= WM_F_SGMII;
   2890 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2891 				break;
   2892 			}
   2893 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2894 			/*FALLTHROUGH*/
   2895 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2896 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2897 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2898 				if (link_mode
   2899 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2900 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2901 					sc->sc_flags |= WM_F_SGMII;
   2902 					aprint_verbose_dev(sc->sc_dev,
   2903 					    "SGMII\n");
   2904 				} else {
   2905 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2906 					aprint_verbose_dev(sc->sc_dev,
   2907 					    "SERDES\n");
   2908 				}
   2909 				break;
   2910 			}
   2911 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2912 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2913 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2914 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2915 				sc->sc_flags |= WM_F_SGMII;
   2916 			}
   2917 			/* Do not change link mode for 100BaseFX */
   2918 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2919 				break;
   2920 
   2921 			/* Change current link mode setting */
   2922 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2923 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2924 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2925 			else
   2926 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2927 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2928 			break;
   2929 		case CTRL_EXT_LINK_MODE_GMII:
   2930 		default:
   2931 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2932 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2933 			break;
   2934 		}
   2935 
   2936 		reg &= ~CTRL_EXT_I2C_ENA;
   2937 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2938 			reg |= CTRL_EXT_I2C_ENA;
   2939 		else
   2940 			reg &= ~CTRL_EXT_I2C_ENA;
   2941 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2942 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2943 			if (!wm_sgmii_uses_mdio(sc))
   2944 				wm_gmii_setup_phytype(sc, 0, 0);
   2945 			wm_reset_mdicnfg_82580(sc);
   2946 		}
   2947 	} else if (sc->sc_type < WM_T_82543 ||
   2948 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2949 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2950 			aprint_error_dev(sc->sc_dev,
   2951 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2952 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2953 		}
   2954 	} else {
   2955 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2956 			aprint_error_dev(sc->sc_dev,
   2957 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2958 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2959 		}
   2960 	}
   2961 
   2962 	if (sc->sc_type >= WM_T_PCH2)
   2963 		sc->sc_flags |= WM_F_EEE;
   2964 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2965 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2966 		/* XXX: Need special handling for I354. (not yet) */
   2967 		if (sc->sc_type != WM_T_I354)
   2968 			sc->sc_flags |= WM_F_EEE;
   2969 	}
   2970 
   2971 	/*
   2972 	 * The I350 has a bug where it always strips the CRC whether
   2973 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2974 	 */
   2975 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2976 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2977 		sc->sc_flags |= WM_F_CRC_STRIP;
   2978 
   2979 	/* Set device properties (macflags) */
   2980 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2981 
   2982 	if (sc->sc_flags != 0) {
   2983 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2984 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2985 	}
   2986 
   2987 #ifdef WM_MPSAFE
   2988 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2989 #else
   2990 	sc->sc_core_lock = NULL;
   2991 #endif
   2992 
   2993 	/* Initialize the media structures accordingly. */
   2994 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2995 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2996 	else
   2997 		wm_tbi_mediainit(sc); /* All others */
   2998 
   2999 	ifp = &sc->sc_ethercom.ec_if;
   3000 	xname = device_xname(sc->sc_dev);
   3001 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3002 	ifp->if_softc = sc;
   3003 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3004 #ifdef WM_MPSAFE
   3005 	ifp->if_extflags = IFEF_MPSAFE;
   3006 #endif
   3007 	ifp->if_ioctl = wm_ioctl;
   3008 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3009 		ifp->if_start = wm_nq_start;
   3010 		/*
   3011 		 * When the number of CPUs is one and the controller can use
   3012 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3013 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3014 		 * and the other is used for link status changing.
   3015 		 * In this situation, wm_nq_transmit() is disadvantageous
   3016 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3017 		 */
   3018 		if (wm_is_using_multiqueue(sc))
   3019 			ifp->if_transmit = wm_nq_transmit;
   3020 	} else {
   3021 		ifp->if_start = wm_start;
   3022 		/*
   3023 		 * wm_transmit() has the same disadvantage as wm_transmit().
   3024 		 */
   3025 		if (wm_is_using_multiqueue(sc))
   3026 			ifp->if_transmit = wm_transmit;
   3027 	}
   3028 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3029 	ifp->if_init = wm_init;
   3030 	ifp->if_stop = wm_stop;
   3031 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3032 	IFQ_SET_READY(&ifp->if_snd);
   3033 
   3034 	/* Check for jumbo frame */
   3035 	switch (sc->sc_type) {
   3036 	case WM_T_82573:
   3037 		/* XXX limited to 9234 if ASPM is disabled */
   3038 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3039 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3040 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3041 		break;
   3042 	case WM_T_82571:
   3043 	case WM_T_82572:
   3044 	case WM_T_82574:
   3045 	case WM_T_82583:
   3046 	case WM_T_82575:
   3047 	case WM_T_82576:
   3048 	case WM_T_82580:
   3049 	case WM_T_I350:
   3050 	case WM_T_I354:
   3051 	case WM_T_I210:
   3052 	case WM_T_I211:
   3053 	case WM_T_80003:
   3054 	case WM_T_ICH9:
   3055 	case WM_T_ICH10:
   3056 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3057 	case WM_T_PCH_LPT:
   3058 	case WM_T_PCH_SPT:
   3059 	case WM_T_PCH_CNP:
   3060 		/* XXX limited to 9234 */
   3061 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3062 		break;
   3063 	case WM_T_PCH:
   3064 		/* XXX limited to 4096 */
   3065 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3066 		break;
   3067 	case WM_T_82542_2_0:
   3068 	case WM_T_82542_2_1:
   3069 	case WM_T_ICH8:
   3070 		/* No support for jumbo frame */
   3071 		break;
   3072 	default:
   3073 		/* ETHER_MAX_LEN_JUMBO */
   3074 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3075 		break;
   3076 	}
   3077 
   3078 	/* If we're a i82543 or greater, we can support VLANs. */
   3079 	if (sc->sc_type >= WM_T_82543) {
   3080 		sc->sc_ethercom.ec_capabilities |=
   3081 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3082 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3083 	}
   3084 
   3085 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3086 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3087 
   3088 	/*
   3089 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3090 	 * on i82543 and later.
   3091 	 */
   3092 	if (sc->sc_type >= WM_T_82543) {
   3093 		ifp->if_capabilities |=
   3094 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3095 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3096 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3097 		    IFCAP_CSUM_TCPv6_Tx |
   3098 		    IFCAP_CSUM_UDPv6_Tx;
   3099 	}
   3100 
   3101 	/*
   3102 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3103 	 *
   3104 	 *	82541GI (8086:1076) ... no
   3105 	 *	82572EI (8086:10b9) ... yes
   3106 	 */
   3107 	if (sc->sc_type >= WM_T_82571) {
   3108 		ifp->if_capabilities |=
   3109 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3110 	}
   3111 
   3112 	/*
   3113 	 * If we're a i82544 or greater (except i82547), we can do
   3114 	 * TCP segmentation offload.
   3115 	 */
   3116 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3117 		ifp->if_capabilities |= IFCAP_TSOv4;
   3118 	}
   3119 
   3120 	if (sc->sc_type >= WM_T_82571) {
   3121 		ifp->if_capabilities |= IFCAP_TSOv6;
   3122 	}
   3123 
   3124 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3125 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3126 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3127 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3128 
   3129 	/* Attach the interface. */
   3130 	if_initialize(ifp);
   3131 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3132 	ether_ifattach(ifp, enaddr);
   3133 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3134 	if_register(ifp);
   3135 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3136 	    RND_FLAG_DEFAULT);
   3137 
   3138 #ifdef WM_EVENT_COUNTERS
   3139 	/* Attach event counters. */
   3140 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3141 	    NULL, xname, "linkintr");
   3142 
   3143 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3144 	    NULL, xname, "tx_xoff");
   3145 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3146 	    NULL, xname, "tx_xon");
   3147 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3148 	    NULL, xname, "rx_xoff");
   3149 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3150 	    NULL, xname, "rx_xon");
   3151 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3152 	    NULL, xname, "rx_macctl");
   3153 #endif /* WM_EVENT_COUNTERS */
   3154 
   3155 	sc->sc_txrx_use_workqueue = false;
   3156 
   3157 	if (wm_phy_need_linkdown_discard(sc))
   3158 		wm_set_linkdown_discard(sc);
   3159 
   3160 	wm_init_sysctls(sc);
   3161 
   3162 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3163 		pmf_class_network_register(self, ifp);
   3164 	else
   3165 		aprint_error_dev(self, "couldn't establish power handler\n");
   3166 
   3167 	sc->sc_flags |= WM_F_ATTACHED;
   3168 out:
   3169 	return;
   3170 }
   3171 
   3172 /* The detach function (ca_detach) */
   3173 static int
   3174 wm_detach(device_t self, int flags __unused)
   3175 {
   3176 	struct wm_softc *sc = device_private(self);
   3177 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3178 	int i;
   3179 
   3180 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3181 		return 0;
   3182 
   3183 	/* Stop the interface. Callouts are stopped in it. */
   3184 	wm_stop(ifp, 1);
   3185 
   3186 	pmf_device_deregister(self);
   3187 
   3188 	sysctl_teardown(&sc->sc_sysctllog);
   3189 
   3190 #ifdef WM_EVENT_COUNTERS
   3191 	evcnt_detach(&sc->sc_ev_linkintr);
   3192 
   3193 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3194 	evcnt_detach(&sc->sc_ev_tx_xon);
   3195 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3196 	evcnt_detach(&sc->sc_ev_rx_xon);
   3197 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3198 #endif /* WM_EVENT_COUNTERS */
   3199 
   3200 	rnd_detach_source(&sc->rnd_source);
   3201 
   3202 	/* Tell the firmware about the release */
   3203 	WM_CORE_LOCK(sc);
   3204 	wm_release_manageability(sc);
   3205 	wm_release_hw_control(sc);
   3206 	wm_enable_wakeup(sc);
   3207 	WM_CORE_UNLOCK(sc);
   3208 
   3209 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3210 
   3211 	ether_ifdetach(ifp);
   3212 	if_detach(ifp);
   3213 	if_percpuq_destroy(sc->sc_ipq);
   3214 
   3215 	/* Delete all remaining media. */
   3216 	ifmedia_fini(&sc->sc_mii.mii_media);
   3217 
   3218 	/* Unload RX dmamaps and free mbufs */
   3219 	for (i = 0; i < sc->sc_nqueues; i++) {
   3220 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3221 		mutex_enter(rxq->rxq_lock);
   3222 		wm_rxdrain(rxq);
   3223 		mutex_exit(rxq->rxq_lock);
   3224 	}
   3225 	/* Must unlock here */
   3226 
   3227 	/* Disestablish the interrupt handler */
   3228 	for (i = 0; i < sc->sc_nintrs; i++) {
   3229 		if (sc->sc_ihs[i] != NULL) {
   3230 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3231 			sc->sc_ihs[i] = NULL;
   3232 		}
   3233 	}
   3234 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3235 
   3236 	/* wm_stop() ensure workqueue is stopped. */
   3237 	workqueue_destroy(sc->sc_queue_wq);
   3238 
   3239 	for (i = 0; i < sc->sc_nqueues; i++)
   3240 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3241 
   3242 	wm_free_txrx_queues(sc);
   3243 
   3244 	/* Unmap the registers */
   3245 	if (sc->sc_ss) {
   3246 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3247 		sc->sc_ss = 0;
   3248 	}
   3249 	if (sc->sc_ios) {
   3250 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3251 		sc->sc_ios = 0;
   3252 	}
   3253 	if (sc->sc_flashs) {
   3254 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3255 		sc->sc_flashs = 0;
   3256 	}
   3257 
   3258 	if (sc->sc_core_lock)
   3259 		mutex_obj_free(sc->sc_core_lock);
   3260 	if (sc->sc_ich_phymtx)
   3261 		mutex_obj_free(sc->sc_ich_phymtx);
   3262 	if (sc->sc_ich_nvmmtx)
   3263 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3264 
   3265 	return 0;
   3266 }
   3267 
   3268 static bool
   3269 wm_suspend(device_t self, const pmf_qual_t *qual)
   3270 {
   3271 	struct wm_softc *sc = device_private(self);
   3272 
   3273 	wm_release_manageability(sc);
   3274 	wm_release_hw_control(sc);
   3275 	wm_enable_wakeup(sc);
   3276 
   3277 	return true;
   3278 }
   3279 
   3280 static bool
   3281 wm_resume(device_t self, const pmf_qual_t *qual)
   3282 {
   3283 	struct wm_softc *sc = device_private(self);
   3284 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3285 	pcireg_t reg;
   3286 	char buf[256];
   3287 
   3288 	reg = CSR_READ(sc, WMREG_WUS);
   3289 	if (reg != 0) {
   3290 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3291 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3292 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3293 	}
   3294 
   3295 	if (sc->sc_type >= WM_T_PCH2)
   3296 		wm_resume_workarounds_pchlan(sc);
   3297 	if ((ifp->if_flags & IFF_UP) == 0) {
   3298 		/* >= PCH_SPT hardware workaround before reset. */
   3299 		if (sc->sc_type >= WM_T_PCH_SPT)
   3300 			wm_flush_desc_rings(sc);
   3301 
   3302 		wm_reset(sc);
   3303 		/* Non-AMT based hardware can now take control from firmware */
   3304 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3305 			wm_get_hw_control(sc);
   3306 		wm_init_manageability(sc);
   3307 	} else {
   3308 		/*
   3309 		 * We called pmf_class_network_register(), so if_init() is
   3310 		 * automatically called when IFF_UP. wm_reset(),
   3311 		 * wm_get_hw_control() and wm_init_manageability() are called
   3312 		 * via wm_init().
   3313 		 */
   3314 	}
   3315 
   3316 	return true;
   3317 }
   3318 
   3319 /*
   3320  * wm_watchdog:		[ifnet interface function]
   3321  *
   3322  *	Watchdog timer handler.
   3323  */
   3324 static void
   3325 wm_watchdog(struct ifnet *ifp)
   3326 {
   3327 	int qid;
   3328 	struct wm_softc *sc = ifp->if_softc;
   3329 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3330 
   3331 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3332 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3333 
   3334 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3335 	}
   3336 
   3337 	/* IF any of queues hanged up, reset the interface. */
   3338 	if (hang_queue != 0) {
   3339 		(void)wm_init(ifp);
   3340 
   3341 		/*
   3342 		 * There are still some upper layer processing which call
   3343 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3344 		 */
   3345 		/* Try to get more packets going. */
   3346 		ifp->if_start(ifp);
   3347 	}
   3348 }
   3349 
   3350 
   3351 static void
   3352 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3353 {
   3354 
   3355 	mutex_enter(txq->txq_lock);
   3356 	if (txq->txq_sending &&
   3357 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3358 		wm_watchdog_txq_locked(ifp, txq, hang);
   3359 
   3360 	mutex_exit(txq->txq_lock);
   3361 }
   3362 
   3363 static void
   3364 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3365     uint16_t *hang)
   3366 {
   3367 	struct wm_softc *sc = ifp->if_softc;
   3368 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3369 
   3370 	KASSERT(mutex_owned(txq->txq_lock));
   3371 
   3372 	/*
   3373 	 * Since we're using delayed interrupts, sweep up
   3374 	 * before we report an error.
   3375 	 */
   3376 	wm_txeof(txq, UINT_MAX);
   3377 
   3378 	if (txq->txq_sending)
   3379 		*hang |= __BIT(wmq->wmq_id);
   3380 
   3381 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3382 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3383 		    device_xname(sc->sc_dev));
   3384 	} else {
   3385 #ifdef WM_DEBUG
   3386 		int i, j;
   3387 		struct wm_txsoft *txs;
   3388 #endif
   3389 		log(LOG_ERR,
   3390 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3391 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3392 		    txq->txq_next);
   3393 		if_statinc(ifp, if_oerrors);
   3394 #ifdef WM_DEBUG
   3395 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3396 		    i = WM_NEXTTXS(txq, i)) {
   3397 			txs = &txq->txq_soft[i];
   3398 			printf("txs %d tx %d -> %d\n",
   3399 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3400 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3401 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3402 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3403 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3404 					printf("\t %#08x%08x\n",
   3405 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3406 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3407 				} else {
   3408 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3409 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3410 					    txq->txq_descs[j].wtx_addr.wa_low);
   3411 					printf("\t %#04x%02x%02x%08x\n",
   3412 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3413 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3414 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3415 					    txq->txq_descs[j].wtx_cmdlen);
   3416 				}
   3417 				if (j == txs->txs_lastdesc)
   3418 					break;
   3419 			}
   3420 		}
   3421 #endif
   3422 	}
   3423 }
   3424 
   3425 /*
   3426  * wm_tick:
   3427  *
   3428  *	One second timer, used to check link status, sweep up
   3429  *	completed transmit jobs, etc.
   3430  */
   3431 static void
   3432 wm_tick(void *arg)
   3433 {
   3434 	struct wm_softc *sc = arg;
   3435 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3436 #ifndef WM_MPSAFE
   3437 	int s = splnet();
   3438 #endif
   3439 
   3440 	WM_CORE_LOCK(sc);
   3441 
   3442 	if (sc->sc_core_stopping) {
   3443 		WM_CORE_UNLOCK(sc);
   3444 #ifndef WM_MPSAFE
   3445 		splx(s);
   3446 #endif
   3447 		return;
   3448 	}
   3449 
   3450 	if (sc->sc_type >= WM_T_82542_2_1) {
   3451 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3452 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3453 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3454 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3455 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3456 	}
   3457 
   3458 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3459 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3460 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3461 	    + CSR_READ(sc, WMREG_CRCERRS)
   3462 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3463 	    + CSR_READ(sc, WMREG_SYMERRC)
   3464 	    + CSR_READ(sc, WMREG_RXERRC)
   3465 	    + CSR_READ(sc, WMREG_SEC)
   3466 	    + CSR_READ(sc, WMREG_CEXTERR)
   3467 	    + CSR_READ(sc, WMREG_RLEC));
   3468 	/*
   3469 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3470 	 * memory. It does not mean the number of dropped packet. Because
   3471 	 * ethernet controller can receive packets in such case if there is
   3472 	 * space in phy's FIFO.
   3473 	 *
   3474 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3475 	 * own EVCNT instead of if_iqdrops.
   3476 	 */
   3477 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3478 	IF_STAT_PUTREF(ifp);
   3479 
   3480 	if (sc->sc_flags & WM_F_HAS_MII)
   3481 		mii_tick(&sc->sc_mii);
   3482 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3483 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3484 		wm_serdes_tick(sc);
   3485 	else
   3486 		wm_tbi_tick(sc);
   3487 
   3488 	WM_CORE_UNLOCK(sc);
   3489 
   3490 	wm_watchdog(ifp);
   3491 
   3492 	callout_schedule(&sc->sc_tick_ch, hz);
   3493 }
   3494 
   3495 static int
   3496 wm_ifflags_cb(struct ethercom *ec)
   3497 {
   3498 	struct ifnet *ifp = &ec->ec_if;
   3499 	struct wm_softc *sc = ifp->if_softc;
   3500 	u_short iffchange;
   3501 	int ecchange;
   3502 	bool needreset = false;
   3503 	int rc = 0;
   3504 
   3505 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3506 		device_xname(sc->sc_dev), __func__));
   3507 
   3508 	WM_CORE_LOCK(sc);
   3509 
   3510 	/*
   3511 	 * Check for if_flags.
   3512 	 * Main usage is to prevent linkdown when opening bpf.
   3513 	 */
   3514 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3515 	sc->sc_if_flags = ifp->if_flags;
   3516 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3517 		needreset = true;
   3518 		goto ec;
   3519 	}
   3520 
   3521 	/* iff related updates */
   3522 	if ((iffchange & IFF_PROMISC) != 0)
   3523 		wm_set_filter(sc);
   3524 
   3525 	wm_set_vlan(sc);
   3526 
   3527 ec:
   3528 	/* Check for ec_capenable. */
   3529 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3530 	sc->sc_ec_capenable = ec->ec_capenable;
   3531 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3532 		needreset = true;
   3533 		goto out;
   3534 	}
   3535 
   3536 	/* ec related updates */
   3537 	wm_set_eee(sc);
   3538 
   3539 out:
   3540 	if (needreset)
   3541 		rc = ENETRESET;
   3542 	WM_CORE_UNLOCK(sc);
   3543 
   3544 	return rc;
   3545 }
   3546 
   3547 static bool
   3548 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3549 {
   3550 
   3551 	switch (sc->sc_phytype) {
   3552 	case WMPHY_82577: /* ihphy */
   3553 	case WMPHY_82578: /* atphy */
   3554 	case WMPHY_82579: /* ihphy */
   3555 	case WMPHY_I217: /* ihphy */
   3556 	case WMPHY_82580: /* ihphy */
   3557 	case WMPHY_I350: /* ihphy */
   3558 		return true;
   3559 	default:
   3560 		return false;
   3561 	}
   3562 }
   3563 
   3564 static void
   3565 wm_set_linkdown_discard(struct wm_softc *sc)
   3566 {
   3567 
   3568 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3569 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3570 
   3571 		mutex_enter(txq->txq_lock);
   3572 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3573 		mutex_exit(txq->txq_lock);
   3574 	}
   3575 }
   3576 
   3577 static void
   3578 wm_clear_linkdown_discard(struct wm_softc *sc)
   3579 {
   3580 
   3581 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3582 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3583 
   3584 		mutex_enter(txq->txq_lock);
   3585 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3586 		mutex_exit(txq->txq_lock);
   3587 	}
   3588 }
   3589 
   3590 /*
   3591  * wm_ioctl:		[ifnet interface function]
   3592  *
   3593  *	Handle control requests from the operator.
   3594  */
   3595 static int
   3596 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3597 {
   3598 	struct wm_softc *sc = ifp->if_softc;
   3599 	struct ifreq *ifr = (struct ifreq *)data;
   3600 	struct ifaddr *ifa = (struct ifaddr *)data;
   3601 	struct sockaddr_dl *sdl;
   3602 	int s, error;
   3603 
   3604 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3605 		device_xname(sc->sc_dev), __func__));
   3606 
   3607 #ifndef WM_MPSAFE
   3608 	s = splnet();
   3609 #endif
   3610 	switch (cmd) {
   3611 	case SIOCSIFMEDIA:
   3612 		WM_CORE_LOCK(sc);
   3613 		/* Flow control requires full-duplex mode. */
   3614 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3615 		    (ifr->ifr_media & IFM_FDX) == 0)
   3616 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3617 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3618 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3619 				/* We can do both TXPAUSE and RXPAUSE. */
   3620 				ifr->ifr_media |=
   3621 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3622 			}
   3623 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3624 		}
   3625 		WM_CORE_UNLOCK(sc);
   3626 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3627 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3628 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
   3629 				wm_set_linkdown_discard(sc);
   3630 			else
   3631 				wm_clear_linkdown_discard(sc);
   3632 		}
   3633 		break;
   3634 	case SIOCINITIFADDR:
   3635 		WM_CORE_LOCK(sc);
   3636 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3637 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3638 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3639 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3640 			/* Unicast address is the first multicast entry */
   3641 			wm_set_filter(sc);
   3642 			error = 0;
   3643 			WM_CORE_UNLOCK(sc);
   3644 			break;
   3645 		}
   3646 		WM_CORE_UNLOCK(sc);
   3647 		if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
   3648 			wm_clear_linkdown_discard(sc);
   3649 		/*FALLTHROUGH*/
   3650 	default:
   3651 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   3652 			if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
   3653 				wm_clear_linkdown_discard(sc);
   3654 			} else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
   3655 				wm_set_linkdown_discard(sc);
   3656 			}
   3657 		}
   3658 #ifdef WM_MPSAFE
   3659 		s = splnet();
   3660 #endif
   3661 		/* It may call wm_start, so unlock here */
   3662 		error = ether_ioctl(ifp, cmd, data);
   3663 #ifdef WM_MPSAFE
   3664 		splx(s);
   3665 #endif
   3666 		if (error != ENETRESET)
   3667 			break;
   3668 
   3669 		error = 0;
   3670 
   3671 		if (cmd == SIOCSIFCAP)
   3672 			error = (*ifp->if_init)(ifp);
   3673 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3674 			;
   3675 		else if (ifp->if_flags & IFF_RUNNING) {
   3676 			/*
   3677 			 * Multicast list has changed; set the hardware filter
   3678 			 * accordingly.
   3679 			 */
   3680 			WM_CORE_LOCK(sc);
   3681 			wm_set_filter(sc);
   3682 			WM_CORE_UNLOCK(sc);
   3683 		}
   3684 		break;
   3685 	}
   3686 
   3687 #ifndef WM_MPSAFE
   3688 	splx(s);
   3689 #endif
   3690 	return error;
   3691 }
   3692 
   3693 /* MAC address related */
   3694 
   3695 /*
   3696  * Get the offset of MAC address and return it.
   3697  * If error occured, use offset 0.
   3698  */
   3699 static uint16_t
   3700 wm_check_alt_mac_addr(struct wm_softc *sc)
   3701 {
   3702 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3703 	uint16_t offset = NVM_OFF_MACADDR;
   3704 
   3705 	/* Try to read alternative MAC address pointer */
   3706 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3707 		return 0;
   3708 
   3709 	/* Check pointer if it's valid or not. */
   3710 	if ((offset == 0x0000) || (offset == 0xffff))
   3711 		return 0;
   3712 
   3713 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3714 	/*
   3715 	 * Check whether alternative MAC address is valid or not.
   3716 	 * Some cards have non 0xffff pointer but those don't use
   3717 	 * alternative MAC address in reality.
   3718 	 *
   3719 	 * Check whether the broadcast bit is set or not.
   3720 	 */
   3721 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3722 		if (((myea[0] & 0xff) & 0x01) == 0)
   3723 			return offset; /* Found */
   3724 
   3725 	/* Not found */
   3726 	return 0;
   3727 }
   3728 
   3729 static int
   3730 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3731 {
   3732 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3733 	uint16_t offset = NVM_OFF_MACADDR;
   3734 	int do_invert = 0;
   3735 
   3736 	switch (sc->sc_type) {
   3737 	case WM_T_82580:
   3738 	case WM_T_I350:
   3739 	case WM_T_I354:
   3740 		/* EEPROM Top Level Partitioning */
   3741 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3742 		break;
   3743 	case WM_T_82571:
   3744 	case WM_T_82575:
   3745 	case WM_T_82576:
   3746 	case WM_T_80003:
   3747 	case WM_T_I210:
   3748 	case WM_T_I211:
   3749 		offset = wm_check_alt_mac_addr(sc);
   3750 		if (offset == 0)
   3751 			if ((sc->sc_funcid & 0x01) == 1)
   3752 				do_invert = 1;
   3753 		break;
   3754 	default:
   3755 		if ((sc->sc_funcid & 0x01) == 1)
   3756 			do_invert = 1;
   3757 		break;
   3758 	}
   3759 
   3760 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3761 		goto bad;
   3762 
   3763 	enaddr[0] = myea[0] & 0xff;
   3764 	enaddr[1] = myea[0] >> 8;
   3765 	enaddr[2] = myea[1] & 0xff;
   3766 	enaddr[3] = myea[1] >> 8;
   3767 	enaddr[4] = myea[2] & 0xff;
   3768 	enaddr[5] = myea[2] >> 8;
   3769 
   3770 	/*
   3771 	 * Toggle the LSB of the MAC address on the second port
   3772 	 * of some dual port cards.
   3773 	 */
   3774 	if (do_invert != 0)
   3775 		enaddr[5] ^= 1;
   3776 
   3777 	return 0;
   3778 
   3779  bad:
   3780 	return -1;
   3781 }
   3782 
   3783 /*
   3784  * wm_set_ral:
   3785  *
   3786  *	Set an entery in the receive address list.
   3787  */
   3788 static void
   3789 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3790 {
   3791 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3792 	uint32_t wlock_mac;
   3793 	int rv;
   3794 
   3795 	if (enaddr != NULL) {
   3796 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3797 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3798 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3799 		ral_hi |= RAL_AV;
   3800 	} else {
   3801 		ral_lo = 0;
   3802 		ral_hi = 0;
   3803 	}
   3804 
   3805 	switch (sc->sc_type) {
   3806 	case WM_T_82542_2_0:
   3807 	case WM_T_82542_2_1:
   3808 	case WM_T_82543:
   3809 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3810 		CSR_WRITE_FLUSH(sc);
   3811 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3812 		CSR_WRITE_FLUSH(sc);
   3813 		break;
   3814 	case WM_T_PCH2:
   3815 	case WM_T_PCH_LPT:
   3816 	case WM_T_PCH_SPT:
   3817 	case WM_T_PCH_CNP:
   3818 		if (idx == 0) {
   3819 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3820 			CSR_WRITE_FLUSH(sc);
   3821 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3822 			CSR_WRITE_FLUSH(sc);
   3823 			return;
   3824 		}
   3825 		if (sc->sc_type != WM_T_PCH2) {
   3826 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3827 			    FWSM_WLOCK_MAC);
   3828 			addrl = WMREG_SHRAL(idx - 1);
   3829 			addrh = WMREG_SHRAH(idx - 1);
   3830 		} else {
   3831 			wlock_mac = 0;
   3832 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3833 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3834 		}
   3835 
   3836 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3837 			rv = wm_get_swflag_ich8lan(sc);
   3838 			if (rv != 0)
   3839 				return;
   3840 			CSR_WRITE(sc, addrl, ral_lo);
   3841 			CSR_WRITE_FLUSH(sc);
   3842 			CSR_WRITE(sc, addrh, ral_hi);
   3843 			CSR_WRITE_FLUSH(sc);
   3844 			wm_put_swflag_ich8lan(sc);
   3845 		}
   3846 
   3847 		break;
   3848 	default:
   3849 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3850 		CSR_WRITE_FLUSH(sc);
   3851 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3852 		CSR_WRITE_FLUSH(sc);
   3853 		break;
   3854 	}
   3855 }
   3856 
   3857 /*
   3858  * wm_mchash:
   3859  *
   3860  *	Compute the hash of the multicast address for the 4096-bit
   3861  *	multicast filter.
   3862  */
   3863 static uint32_t
   3864 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3865 {
   3866 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3867 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3868 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3869 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3870 	uint32_t hash;
   3871 
   3872 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3873 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3874 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3875 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3876 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3877 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3878 		return (hash & 0x3ff);
   3879 	}
   3880 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3881 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3882 
   3883 	return (hash & 0xfff);
   3884 }
   3885 
   3886 /*
   3887  *
   3888  *
   3889  */
   3890 static int
   3891 wm_rar_count(struct wm_softc *sc)
   3892 {
   3893 	int size;
   3894 
   3895 	switch (sc->sc_type) {
   3896 	case WM_T_ICH8:
   3897 		size = WM_RAL_TABSIZE_ICH8 -1;
   3898 		break;
   3899 	case WM_T_ICH9:
   3900 	case WM_T_ICH10:
   3901 	case WM_T_PCH:
   3902 		size = WM_RAL_TABSIZE_ICH8;
   3903 		break;
   3904 	case WM_T_PCH2:
   3905 		size = WM_RAL_TABSIZE_PCH2;
   3906 		break;
   3907 	case WM_T_PCH_LPT:
   3908 	case WM_T_PCH_SPT:
   3909 	case WM_T_PCH_CNP:
   3910 		size = WM_RAL_TABSIZE_PCH_LPT;
   3911 		break;
   3912 	case WM_T_82575:
   3913 	case WM_T_I210:
   3914 	case WM_T_I211:
   3915 		size = WM_RAL_TABSIZE_82575;
   3916 		break;
   3917 	case WM_T_82576:
   3918 	case WM_T_82580:
   3919 		size = WM_RAL_TABSIZE_82576;
   3920 		break;
   3921 	case WM_T_I350:
   3922 	case WM_T_I354:
   3923 		size = WM_RAL_TABSIZE_I350;
   3924 		break;
   3925 	default:
   3926 		size = WM_RAL_TABSIZE;
   3927 	}
   3928 
   3929 	return size;
   3930 }
   3931 
   3932 /*
   3933  * wm_set_filter:
   3934  *
   3935  *	Set up the receive filter.
   3936  */
   3937 static void
   3938 wm_set_filter(struct wm_softc *sc)
   3939 {
   3940 	struct ethercom *ec = &sc->sc_ethercom;
   3941 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3942 	struct ether_multi *enm;
   3943 	struct ether_multistep step;
   3944 	bus_addr_t mta_reg;
   3945 	uint32_t hash, reg, bit;
   3946 	int i, size, ralmax, rv;
   3947 
   3948 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3949 		device_xname(sc->sc_dev), __func__));
   3950 
   3951 	if (sc->sc_type >= WM_T_82544)
   3952 		mta_reg = WMREG_CORDOVA_MTA;
   3953 	else
   3954 		mta_reg = WMREG_MTA;
   3955 
   3956 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3957 
   3958 	if (ifp->if_flags & IFF_BROADCAST)
   3959 		sc->sc_rctl |= RCTL_BAM;
   3960 	if (ifp->if_flags & IFF_PROMISC) {
   3961 		sc->sc_rctl |= RCTL_UPE;
   3962 		ETHER_LOCK(ec);
   3963 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3964 		ETHER_UNLOCK(ec);
   3965 		goto allmulti;
   3966 	}
   3967 
   3968 	/*
   3969 	 * Set the station address in the first RAL slot, and
   3970 	 * clear the remaining slots.
   3971 	 */
   3972 	size = wm_rar_count(sc);
   3973 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3974 
   3975 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3976 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3977 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3978 		switch (i) {
   3979 		case 0:
   3980 			/* We can use all entries */
   3981 			ralmax = size;
   3982 			break;
   3983 		case 1:
   3984 			/* Only RAR[0] */
   3985 			ralmax = 1;
   3986 			break;
   3987 		default:
   3988 			/* Available SHRA + RAR[0] */
   3989 			ralmax = i + 1;
   3990 		}
   3991 	} else
   3992 		ralmax = size;
   3993 	for (i = 1; i < size; i++) {
   3994 		if (i < ralmax)
   3995 			wm_set_ral(sc, NULL, i);
   3996 	}
   3997 
   3998 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3999 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4000 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4001 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4002 		size = WM_ICH8_MC_TABSIZE;
   4003 	else
   4004 		size = WM_MC_TABSIZE;
   4005 	/* Clear out the multicast table. */
   4006 	for (i = 0; i < size; i++) {
   4007 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4008 		CSR_WRITE_FLUSH(sc);
   4009 	}
   4010 
   4011 	ETHER_LOCK(ec);
   4012 	ETHER_FIRST_MULTI(step, ec, enm);
   4013 	while (enm != NULL) {
   4014 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4015 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4016 			ETHER_UNLOCK(ec);
   4017 			/*
   4018 			 * We must listen to a range of multicast addresses.
   4019 			 * For now, just accept all multicasts, rather than
   4020 			 * trying to set only those filter bits needed to match
   4021 			 * the range.  (At this time, the only use of address
   4022 			 * ranges is for IP multicast routing, for which the
   4023 			 * range is big enough to require all bits set.)
   4024 			 */
   4025 			goto allmulti;
   4026 		}
   4027 
   4028 		hash = wm_mchash(sc, enm->enm_addrlo);
   4029 
   4030 		reg = (hash >> 5);
   4031 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4032 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4033 		    || (sc->sc_type == WM_T_PCH2)
   4034 		    || (sc->sc_type == WM_T_PCH_LPT)
   4035 		    || (sc->sc_type == WM_T_PCH_SPT)
   4036 		    || (sc->sc_type == WM_T_PCH_CNP))
   4037 			reg &= 0x1f;
   4038 		else
   4039 			reg &= 0x7f;
   4040 		bit = hash & 0x1f;
   4041 
   4042 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4043 		hash |= 1U << bit;
   4044 
   4045 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4046 			/*
   4047 			 * 82544 Errata 9: Certain register cannot be written
   4048 			 * with particular alignments in PCI-X bus operation
   4049 			 * (FCAH, MTA and VFTA).
   4050 			 */
   4051 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4052 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4053 			CSR_WRITE_FLUSH(sc);
   4054 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4055 			CSR_WRITE_FLUSH(sc);
   4056 		} else {
   4057 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4058 			CSR_WRITE_FLUSH(sc);
   4059 		}
   4060 
   4061 		ETHER_NEXT_MULTI(step, enm);
   4062 	}
   4063 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4064 	ETHER_UNLOCK(ec);
   4065 
   4066 	goto setit;
   4067 
   4068  allmulti:
   4069 	sc->sc_rctl |= RCTL_MPE;
   4070 
   4071  setit:
   4072 	if (sc->sc_type >= WM_T_PCH2) {
   4073 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4074 		    && (ifp->if_mtu > ETHERMTU))
   4075 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4076 		else
   4077 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4078 		if (rv != 0)
   4079 			device_printf(sc->sc_dev,
   4080 			    "Failed to do workaround for jumbo frame.\n");
   4081 	}
   4082 
   4083 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4084 }
   4085 
   4086 /* Reset and init related */
   4087 
   4088 static void
   4089 wm_set_vlan(struct wm_softc *sc)
   4090 {
   4091 
   4092 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4093 		device_xname(sc->sc_dev), __func__));
   4094 
   4095 	/* Deal with VLAN enables. */
   4096 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4097 		sc->sc_ctrl |= CTRL_VME;
   4098 	else
   4099 		sc->sc_ctrl &= ~CTRL_VME;
   4100 
   4101 	/* Write the control registers. */
   4102 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4103 }
   4104 
   4105 static void
   4106 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4107 {
   4108 	uint32_t gcr;
   4109 	pcireg_t ctrl2;
   4110 
   4111 	gcr = CSR_READ(sc, WMREG_GCR);
   4112 
   4113 	/* Only take action if timeout value is defaulted to 0 */
   4114 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4115 		goto out;
   4116 
   4117 	if ((gcr & GCR_CAP_VER2) == 0) {
   4118 		gcr |= GCR_CMPL_TMOUT_10MS;
   4119 		goto out;
   4120 	}
   4121 
   4122 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4123 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4124 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4125 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4126 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4127 
   4128 out:
   4129 	/* Disable completion timeout resend */
   4130 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4131 
   4132 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4133 }
   4134 
   4135 void
   4136 wm_get_auto_rd_done(struct wm_softc *sc)
   4137 {
   4138 	int i;
   4139 
   4140 	/* wait for eeprom to reload */
   4141 	switch (sc->sc_type) {
   4142 	case WM_T_82571:
   4143 	case WM_T_82572:
   4144 	case WM_T_82573:
   4145 	case WM_T_82574:
   4146 	case WM_T_82583:
   4147 	case WM_T_82575:
   4148 	case WM_T_82576:
   4149 	case WM_T_82580:
   4150 	case WM_T_I350:
   4151 	case WM_T_I354:
   4152 	case WM_T_I210:
   4153 	case WM_T_I211:
   4154 	case WM_T_80003:
   4155 	case WM_T_ICH8:
   4156 	case WM_T_ICH9:
   4157 		for (i = 0; i < 10; i++) {
   4158 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4159 				break;
   4160 			delay(1000);
   4161 		}
   4162 		if (i == 10) {
   4163 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4164 			    "complete\n", device_xname(sc->sc_dev));
   4165 		}
   4166 		break;
   4167 	default:
   4168 		break;
   4169 	}
   4170 }
   4171 
   4172 void
   4173 wm_lan_init_done(struct wm_softc *sc)
   4174 {
   4175 	uint32_t reg = 0;
   4176 	int i;
   4177 
   4178 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4179 		device_xname(sc->sc_dev), __func__));
   4180 
   4181 	/* Wait for eeprom to reload */
   4182 	switch (sc->sc_type) {
   4183 	case WM_T_ICH10:
   4184 	case WM_T_PCH:
   4185 	case WM_T_PCH2:
   4186 	case WM_T_PCH_LPT:
   4187 	case WM_T_PCH_SPT:
   4188 	case WM_T_PCH_CNP:
   4189 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4190 			reg = CSR_READ(sc, WMREG_STATUS);
   4191 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4192 				break;
   4193 			delay(100);
   4194 		}
   4195 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4196 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4197 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4198 		}
   4199 		break;
   4200 	default:
   4201 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4202 		    __func__);
   4203 		break;
   4204 	}
   4205 
   4206 	reg &= ~STATUS_LAN_INIT_DONE;
   4207 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4208 }
   4209 
   4210 void
   4211 wm_get_cfg_done(struct wm_softc *sc)
   4212 {
   4213 	int mask;
   4214 	uint32_t reg;
   4215 	int i;
   4216 
   4217 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4218 		device_xname(sc->sc_dev), __func__));
   4219 
   4220 	/* Wait for eeprom to reload */
   4221 	switch (sc->sc_type) {
   4222 	case WM_T_82542_2_0:
   4223 	case WM_T_82542_2_1:
   4224 		/* null */
   4225 		break;
   4226 	case WM_T_82543:
   4227 	case WM_T_82544:
   4228 	case WM_T_82540:
   4229 	case WM_T_82545:
   4230 	case WM_T_82545_3:
   4231 	case WM_T_82546:
   4232 	case WM_T_82546_3:
   4233 	case WM_T_82541:
   4234 	case WM_T_82541_2:
   4235 	case WM_T_82547:
   4236 	case WM_T_82547_2:
   4237 	case WM_T_82573:
   4238 	case WM_T_82574:
   4239 	case WM_T_82583:
   4240 		/* generic */
   4241 		delay(10*1000);
   4242 		break;
   4243 	case WM_T_80003:
   4244 	case WM_T_82571:
   4245 	case WM_T_82572:
   4246 	case WM_T_82575:
   4247 	case WM_T_82576:
   4248 	case WM_T_82580:
   4249 	case WM_T_I350:
   4250 	case WM_T_I354:
   4251 	case WM_T_I210:
   4252 	case WM_T_I211:
   4253 		if (sc->sc_type == WM_T_82571) {
   4254 			/* Only 82571 shares port 0 */
   4255 			mask = EEMNGCTL_CFGDONE_0;
   4256 		} else
   4257 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4258 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4259 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4260 				break;
   4261 			delay(1000);
   4262 		}
   4263 		if (i >= WM_PHY_CFG_TIMEOUT)
   4264 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4265 				device_xname(sc->sc_dev), __func__));
   4266 		break;
   4267 	case WM_T_ICH8:
   4268 	case WM_T_ICH9:
   4269 	case WM_T_ICH10:
   4270 	case WM_T_PCH:
   4271 	case WM_T_PCH2:
   4272 	case WM_T_PCH_LPT:
   4273 	case WM_T_PCH_SPT:
   4274 	case WM_T_PCH_CNP:
   4275 		delay(10*1000);
   4276 		if (sc->sc_type >= WM_T_ICH10)
   4277 			wm_lan_init_done(sc);
   4278 		else
   4279 			wm_get_auto_rd_done(sc);
   4280 
   4281 		/* Clear PHY Reset Asserted bit */
   4282 		reg = CSR_READ(sc, WMREG_STATUS);
   4283 		if ((reg & STATUS_PHYRA) != 0)
   4284 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4285 		break;
   4286 	default:
   4287 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4288 		    __func__);
   4289 		break;
   4290 	}
   4291 }
   4292 
   4293 int
   4294 wm_phy_post_reset(struct wm_softc *sc)
   4295 {
   4296 	device_t dev = sc->sc_dev;
   4297 	uint16_t reg;
   4298 	int rv = 0;
   4299 
   4300 	/* This function is only for ICH8 and newer. */
   4301 	if (sc->sc_type < WM_T_ICH8)
   4302 		return 0;
   4303 
   4304 	if (wm_phy_resetisblocked(sc)) {
   4305 		/* XXX */
   4306 		device_printf(dev, "PHY is blocked\n");
   4307 		return -1;
   4308 	}
   4309 
   4310 	/* Allow time for h/w to get to quiescent state after reset */
   4311 	delay(10*1000);
   4312 
   4313 	/* Perform any necessary post-reset workarounds */
   4314 	if (sc->sc_type == WM_T_PCH)
   4315 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4316 	else if (sc->sc_type == WM_T_PCH2)
   4317 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4318 	if (rv != 0)
   4319 		return rv;
   4320 
   4321 	/* Clear the host wakeup bit after lcd reset */
   4322 	if (sc->sc_type >= WM_T_PCH) {
   4323 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4324 		reg &= ~BM_WUC_HOST_WU_BIT;
   4325 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4326 	}
   4327 
   4328 	/* Configure the LCD with the extended configuration region in NVM */
   4329 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4330 		return rv;
   4331 
   4332 	/* Configure the LCD with the OEM bits in NVM */
   4333 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4334 
   4335 	if (sc->sc_type == WM_T_PCH2) {
   4336 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4337 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4338 			delay(10 * 1000);
   4339 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4340 		}
   4341 		/* Set EEE LPI Update Timer to 200usec */
   4342 		rv = sc->phy.acquire(sc);
   4343 		if (rv)
   4344 			return rv;
   4345 		rv = wm_write_emi_reg_locked(dev,
   4346 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4347 		sc->phy.release(sc);
   4348 	}
   4349 
   4350 	return rv;
   4351 }
   4352 
   4353 /* Only for PCH and newer */
   4354 static int
   4355 wm_write_smbus_addr(struct wm_softc *sc)
   4356 {
   4357 	uint32_t strap, freq;
   4358 	uint16_t phy_data;
   4359 	int rv;
   4360 
   4361 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4362 		device_xname(sc->sc_dev), __func__));
   4363 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4364 
   4365 	strap = CSR_READ(sc, WMREG_STRAP);
   4366 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4367 
   4368 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4369 	if (rv != 0)
   4370 		return -1;
   4371 
   4372 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4373 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4374 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4375 
   4376 	if (sc->sc_phytype == WMPHY_I217) {
   4377 		/* Restore SMBus frequency */
   4378 		if (freq --) {
   4379 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4380 			    | HV_SMB_ADDR_FREQ_HIGH);
   4381 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4382 			    HV_SMB_ADDR_FREQ_LOW);
   4383 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4384 			    HV_SMB_ADDR_FREQ_HIGH);
   4385 		} else
   4386 			DPRINTF(sc, WM_DEBUG_INIT,
   4387 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4388 				device_xname(sc->sc_dev), __func__));
   4389 	}
   4390 
   4391 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4392 	    phy_data);
   4393 }
   4394 
   4395 static int
   4396 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4397 {
   4398 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4399 	uint16_t phy_page = 0;
   4400 	int rv = 0;
   4401 
   4402 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4403 		device_xname(sc->sc_dev), __func__));
   4404 
   4405 	switch (sc->sc_type) {
   4406 	case WM_T_ICH8:
   4407 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4408 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4409 			return 0;
   4410 
   4411 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4412 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4413 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4414 			break;
   4415 		}
   4416 		/* FALLTHROUGH */
   4417 	case WM_T_PCH:
   4418 	case WM_T_PCH2:
   4419 	case WM_T_PCH_LPT:
   4420 	case WM_T_PCH_SPT:
   4421 	case WM_T_PCH_CNP:
   4422 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4423 		break;
   4424 	default:
   4425 		return 0;
   4426 	}
   4427 
   4428 	if ((rv = sc->phy.acquire(sc)) != 0)
   4429 		return rv;
   4430 
   4431 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4432 	if ((reg & sw_cfg_mask) == 0)
   4433 		goto release;
   4434 
   4435 	/*
   4436 	 * Make sure HW does not configure LCD from PHY extended configuration
   4437 	 * before SW configuration
   4438 	 */
   4439 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4440 	if ((sc->sc_type < WM_T_PCH2)
   4441 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4442 		goto release;
   4443 
   4444 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4445 		device_xname(sc->sc_dev), __func__));
   4446 	/* word_addr is in DWORD */
   4447 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4448 
   4449 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4450 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4451 	if (cnf_size == 0)
   4452 		goto release;
   4453 
   4454 	if (((sc->sc_type == WM_T_PCH)
   4455 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4456 	    || (sc->sc_type > WM_T_PCH)) {
   4457 		/*
   4458 		 * HW configures the SMBus address and LEDs when the OEM and
   4459 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4460 		 * are cleared, SW will configure them instead.
   4461 		 */
   4462 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4463 			device_xname(sc->sc_dev), __func__));
   4464 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4465 			goto release;
   4466 
   4467 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4468 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4469 		    (uint16_t)reg);
   4470 		if (rv != 0)
   4471 			goto release;
   4472 	}
   4473 
   4474 	/* Configure LCD from extended configuration region. */
   4475 	for (i = 0; i < cnf_size; i++) {
   4476 		uint16_t reg_data, reg_addr;
   4477 
   4478 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4479 			goto release;
   4480 
   4481 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4482 			goto release;
   4483 
   4484 		if (reg_addr == IGPHY_PAGE_SELECT)
   4485 			phy_page = reg_data;
   4486 
   4487 		reg_addr &= IGPHY_MAXREGADDR;
   4488 		reg_addr |= phy_page;
   4489 
   4490 		KASSERT(sc->phy.writereg_locked != NULL);
   4491 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4492 		    reg_data);
   4493 	}
   4494 
   4495 release:
   4496 	sc->phy.release(sc);
   4497 	return rv;
   4498 }
   4499 
   4500 /*
   4501  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4502  *  @sc:       pointer to the HW structure
   4503  *  @d0_state: boolean if entering d0 or d3 device state
   4504  *
   4505  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4506  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4507  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4508  */
   4509 int
   4510 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4511 {
   4512 	uint32_t mac_reg;
   4513 	uint16_t oem_reg;
   4514 	int rv;
   4515 
   4516 	if (sc->sc_type < WM_T_PCH)
   4517 		return 0;
   4518 
   4519 	rv = sc->phy.acquire(sc);
   4520 	if (rv != 0)
   4521 		return rv;
   4522 
   4523 	if (sc->sc_type == WM_T_PCH) {
   4524 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4525 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4526 			goto release;
   4527 	}
   4528 
   4529 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4530 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4531 		goto release;
   4532 
   4533 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4534 
   4535 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4536 	if (rv != 0)
   4537 		goto release;
   4538 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4539 
   4540 	if (d0_state) {
   4541 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4542 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4543 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4544 			oem_reg |= HV_OEM_BITS_LPLU;
   4545 	} else {
   4546 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4547 		    != 0)
   4548 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4549 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4550 		    != 0)
   4551 			oem_reg |= HV_OEM_BITS_LPLU;
   4552 	}
   4553 
   4554 	/* Set Restart auto-neg to activate the bits */
   4555 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4556 	    && (wm_phy_resetisblocked(sc) == false))
   4557 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4558 
   4559 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4560 
   4561 release:
   4562 	sc->phy.release(sc);
   4563 
   4564 	return rv;
   4565 }
   4566 
   4567 /* Init hardware bits */
   4568 void
   4569 wm_initialize_hardware_bits(struct wm_softc *sc)
   4570 {
   4571 	uint32_t tarc0, tarc1, reg;
   4572 
   4573 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4574 		device_xname(sc->sc_dev), __func__));
   4575 
   4576 	/* For 82571 variant, 80003 and ICHs */
   4577 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4578 	    || (sc->sc_type >= WM_T_80003)) {
   4579 
   4580 		/* Transmit Descriptor Control 0 */
   4581 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4582 		reg |= TXDCTL_COUNT_DESC;
   4583 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4584 
   4585 		/* Transmit Descriptor Control 1 */
   4586 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4587 		reg |= TXDCTL_COUNT_DESC;
   4588 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4589 
   4590 		/* TARC0 */
   4591 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4592 		switch (sc->sc_type) {
   4593 		case WM_T_82571:
   4594 		case WM_T_82572:
   4595 		case WM_T_82573:
   4596 		case WM_T_82574:
   4597 		case WM_T_82583:
   4598 		case WM_T_80003:
   4599 			/* Clear bits 30..27 */
   4600 			tarc0 &= ~__BITS(30, 27);
   4601 			break;
   4602 		default:
   4603 			break;
   4604 		}
   4605 
   4606 		switch (sc->sc_type) {
   4607 		case WM_T_82571:
   4608 		case WM_T_82572:
   4609 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4610 
   4611 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4612 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4613 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4614 			/* 8257[12] Errata No.7 */
   4615 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4616 
   4617 			/* TARC1 bit 28 */
   4618 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4619 				tarc1 &= ~__BIT(28);
   4620 			else
   4621 				tarc1 |= __BIT(28);
   4622 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4623 
   4624 			/*
   4625 			 * 8257[12] Errata No.13
   4626 			 * Disable Dyamic Clock Gating.
   4627 			 */
   4628 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4629 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4630 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4631 			break;
   4632 		case WM_T_82573:
   4633 		case WM_T_82574:
   4634 		case WM_T_82583:
   4635 			if ((sc->sc_type == WM_T_82574)
   4636 			    || (sc->sc_type == WM_T_82583))
   4637 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4638 
   4639 			/* Extended Device Control */
   4640 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4641 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4642 			reg |= __BIT(22);	/* Set bit 22 */
   4643 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4644 
   4645 			/* Device Control */
   4646 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4647 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4648 
   4649 			/* PCIe Control Register */
   4650 			/*
   4651 			 * 82573 Errata (unknown).
   4652 			 *
   4653 			 * 82574 Errata 25 and 82583 Errata 12
   4654 			 * "Dropped Rx Packets":
   4655 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4656 			 */
   4657 			reg = CSR_READ(sc, WMREG_GCR);
   4658 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4659 			CSR_WRITE(sc, WMREG_GCR, reg);
   4660 
   4661 			if ((sc->sc_type == WM_T_82574)
   4662 			    || (sc->sc_type == WM_T_82583)) {
   4663 				/*
   4664 				 * Document says this bit must be set for
   4665 				 * proper operation.
   4666 				 */
   4667 				reg = CSR_READ(sc, WMREG_GCR);
   4668 				reg |= __BIT(22);
   4669 				CSR_WRITE(sc, WMREG_GCR, reg);
   4670 
   4671 				/*
   4672 				 * Apply workaround for hardware errata
   4673 				 * documented in errata docs Fixes issue where
   4674 				 * some error prone or unreliable PCIe
   4675 				 * completions are occurring, particularly
   4676 				 * with ASPM enabled. Without fix, issue can
   4677 				 * cause Tx timeouts.
   4678 				 */
   4679 				reg = CSR_READ(sc, WMREG_GCR2);
   4680 				reg |= __BIT(0);
   4681 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4682 			}
   4683 			break;
   4684 		case WM_T_80003:
   4685 			/* TARC0 */
   4686 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4687 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4688 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4689 
   4690 			/* TARC1 bit 28 */
   4691 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4692 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4693 				tarc1 &= ~__BIT(28);
   4694 			else
   4695 				tarc1 |= __BIT(28);
   4696 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4697 			break;
   4698 		case WM_T_ICH8:
   4699 		case WM_T_ICH9:
   4700 		case WM_T_ICH10:
   4701 		case WM_T_PCH:
   4702 		case WM_T_PCH2:
   4703 		case WM_T_PCH_LPT:
   4704 		case WM_T_PCH_SPT:
   4705 		case WM_T_PCH_CNP:
   4706 			/* TARC0 */
   4707 			if (sc->sc_type == WM_T_ICH8) {
   4708 				/* Set TARC0 bits 29 and 28 */
   4709 				tarc0 |= __BITS(29, 28);
   4710 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4711 				tarc0 |= __BIT(29);
   4712 				/*
   4713 				 *  Drop bit 28. From Linux.
   4714 				 * See I218/I219 spec update
   4715 				 * "5. Buffer Overrun While the I219 is
   4716 				 * Processing DMA Transactions"
   4717 				 */
   4718 				tarc0 &= ~__BIT(28);
   4719 			}
   4720 			/* Set TARC0 bits 23,24,26,27 */
   4721 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4722 
   4723 			/* CTRL_EXT */
   4724 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4725 			reg |= __BIT(22);	/* Set bit 22 */
   4726 			/*
   4727 			 * Enable PHY low-power state when MAC is at D3
   4728 			 * w/o WoL
   4729 			 */
   4730 			if (sc->sc_type >= WM_T_PCH)
   4731 				reg |= CTRL_EXT_PHYPDEN;
   4732 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4733 
   4734 			/* TARC1 */
   4735 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4736 			/* bit 28 */
   4737 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4738 				tarc1 &= ~__BIT(28);
   4739 			else
   4740 				tarc1 |= __BIT(28);
   4741 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4742 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4743 
   4744 			/* Device Status */
   4745 			if (sc->sc_type == WM_T_ICH8) {
   4746 				reg = CSR_READ(sc, WMREG_STATUS);
   4747 				reg &= ~__BIT(31);
   4748 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4749 
   4750 			}
   4751 
   4752 			/* IOSFPC */
   4753 			if (sc->sc_type == WM_T_PCH_SPT) {
   4754 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4755 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4756 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4757 			}
   4758 			/*
   4759 			 * Work-around descriptor data corruption issue during
   4760 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4761 			 * capability.
   4762 			 */
   4763 			reg = CSR_READ(sc, WMREG_RFCTL);
   4764 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4765 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4766 			break;
   4767 		default:
   4768 			break;
   4769 		}
   4770 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4771 
   4772 		switch (sc->sc_type) {
   4773 		/*
   4774 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4775 		 * Avoid RSS Hash Value bug.
   4776 		 */
   4777 		case WM_T_82571:
   4778 		case WM_T_82572:
   4779 		case WM_T_82573:
   4780 		case WM_T_80003:
   4781 		case WM_T_ICH8:
   4782 			reg = CSR_READ(sc, WMREG_RFCTL);
   4783 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4784 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4785 			break;
   4786 		case WM_T_82574:
   4787 			/* Use extened Rx descriptor. */
   4788 			reg = CSR_READ(sc, WMREG_RFCTL);
   4789 			reg |= WMREG_RFCTL_EXSTEN;
   4790 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4791 			break;
   4792 		default:
   4793 			break;
   4794 		}
   4795 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4796 		/*
   4797 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4798 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4799 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4800 		 * Correctly by the Device"
   4801 		 *
   4802 		 * I354(C2000) Errata AVR53:
   4803 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4804 		 * Hang"
   4805 		 */
   4806 		reg = CSR_READ(sc, WMREG_RFCTL);
   4807 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4808 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4809 	}
   4810 }
   4811 
   4812 static uint32_t
   4813 wm_rxpbs_adjust_82580(uint32_t val)
   4814 {
   4815 	uint32_t rv = 0;
   4816 
   4817 	if (val < __arraycount(wm_82580_rxpbs_table))
   4818 		rv = wm_82580_rxpbs_table[val];
   4819 
   4820 	return rv;
   4821 }
   4822 
   4823 /*
   4824  * wm_reset_phy:
   4825  *
   4826  *	generic PHY reset function.
   4827  *	Same as e1000_phy_hw_reset_generic()
   4828  */
   4829 static int
   4830 wm_reset_phy(struct wm_softc *sc)
   4831 {
   4832 	uint32_t reg;
   4833 
   4834 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4835 		device_xname(sc->sc_dev), __func__));
   4836 	if (wm_phy_resetisblocked(sc))
   4837 		return -1;
   4838 
   4839 	sc->phy.acquire(sc);
   4840 
   4841 	reg = CSR_READ(sc, WMREG_CTRL);
   4842 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4843 	CSR_WRITE_FLUSH(sc);
   4844 
   4845 	delay(sc->phy.reset_delay_us);
   4846 
   4847 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4848 	CSR_WRITE_FLUSH(sc);
   4849 
   4850 	delay(150);
   4851 
   4852 	sc->phy.release(sc);
   4853 
   4854 	wm_get_cfg_done(sc);
   4855 	wm_phy_post_reset(sc);
   4856 
   4857 	return 0;
   4858 }
   4859 
   4860 /*
   4861  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   4862  *
   4863  * In i219, the descriptor rings must be emptied before resetting the HW
   4864  * or before changing the device state to D3 during runtime (runtime PM).
   4865  *
   4866  * Failure to do this will cause the HW to enter a unit hang state which can
   4867  * only be released by PCI reset on the device.
   4868  *
   4869  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   4870  */
   4871 static void
   4872 wm_flush_desc_rings(struct wm_softc *sc)
   4873 {
   4874 	pcireg_t preg;
   4875 	uint32_t reg;
   4876 	struct wm_txqueue *txq;
   4877 	wiseman_txdesc_t *txd;
   4878 	int nexttx;
   4879 	uint32_t rctl;
   4880 
   4881 	/* First, disable MULR fix in FEXTNVM11 */
   4882 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4883 	reg |= FEXTNVM11_DIS_MULRFIX;
   4884 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4885 
   4886 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4887 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4888 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4889 		return;
   4890 
   4891 	/*
   4892 	 * Remove all descriptors from the tx_ring.
   4893 	 *
   4894 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   4895 	 * happens when the HW reads the regs. We  assign the ring itself as
   4896 	 * the data of the next descriptor. We don't care about the data we are
   4897 	 * about to reset the HW.
   4898 	 */
   4899 #ifdef WM_DEBUG
   4900 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   4901 #endif
   4902 	reg = CSR_READ(sc, WMREG_TCTL);
   4903 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4904 
   4905 	txq = &sc->sc_queue[0].wmq_txq;
   4906 	nexttx = txq->txq_next;
   4907 	txd = &txq->txq_descs[nexttx];
   4908 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   4909 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4910 	txd->wtx_fields.wtxu_status = 0;
   4911 	txd->wtx_fields.wtxu_options = 0;
   4912 	txd->wtx_fields.wtxu_vlan = 0;
   4913 
   4914 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4915 	    BUS_SPACE_BARRIER_WRITE);
   4916 
   4917 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4918 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4919 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4920 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4921 	delay(250);
   4922 
   4923 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4924 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4925 		return;
   4926 
   4927 	/*
   4928 	 * Mark all descriptors in the RX ring as consumed and disable the
   4929 	 * rx ring.
   4930 	 */
   4931 #ifdef WM_DEBUG
   4932 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4933 #endif
   4934 	rctl = CSR_READ(sc, WMREG_RCTL);
   4935 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4936 	CSR_WRITE_FLUSH(sc);
   4937 	delay(150);
   4938 
   4939 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4940 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4941 	reg &= 0xffffc000;
   4942 	/*
   4943 	 * Update thresholds: prefetch threshold to 31, host threshold
   4944 	 * to 1 and make sure the granularity is "descriptors" and not
   4945 	 * "cache lines"
   4946 	 */
   4947 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4948 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4949 
   4950 	/* Momentarily enable the RX ring for the changes to take effect */
   4951 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4952 	CSR_WRITE_FLUSH(sc);
   4953 	delay(150);
   4954 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4955 }
   4956 
   4957 /*
   4958  * wm_reset:
   4959  *
   4960  *	Reset the i82542 chip.
   4961  */
   4962 static void
   4963 wm_reset(struct wm_softc *sc)
   4964 {
   4965 	int phy_reset = 0;
   4966 	int i, error = 0;
   4967 	uint32_t reg;
   4968 	uint16_t kmreg;
   4969 	int rv;
   4970 
   4971 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4972 		device_xname(sc->sc_dev), __func__));
   4973 	KASSERT(sc->sc_type != 0);
   4974 
   4975 	/*
   4976 	 * Allocate on-chip memory according to the MTU size.
   4977 	 * The Packet Buffer Allocation register must be written
   4978 	 * before the chip is reset.
   4979 	 */
   4980 	switch (sc->sc_type) {
   4981 	case WM_T_82547:
   4982 	case WM_T_82547_2:
   4983 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4984 		    PBA_22K : PBA_30K;
   4985 		for (i = 0; i < sc->sc_nqueues; i++) {
   4986 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4987 			txq->txq_fifo_head = 0;
   4988 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4989 			txq->txq_fifo_size =
   4990 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4991 			txq->txq_fifo_stall = 0;
   4992 		}
   4993 		break;
   4994 	case WM_T_82571:
   4995 	case WM_T_82572:
   4996 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4997 	case WM_T_80003:
   4998 		sc->sc_pba = PBA_32K;
   4999 		break;
   5000 	case WM_T_82573:
   5001 		sc->sc_pba = PBA_12K;
   5002 		break;
   5003 	case WM_T_82574:
   5004 	case WM_T_82583:
   5005 		sc->sc_pba = PBA_20K;
   5006 		break;
   5007 	case WM_T_82576:
   5008 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5009 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5010 		break;
   5011 	case WM_T_82580:
   5012 	case WM_T_I350:
   5013 	case WM_T_I354:
   5014 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5015 		break;
   5016 	case WM_T_I210:
   5017 	case WM_T_I211:
   5018 		sc->sc_pba = PBA_34K;
   5019 		break;
   5020 	case WM_T_ICH8:
   5021 		/* Workaround for a bit corruption issue in FIFO memory */
   5022 		sc->sc_pba = PBA_8K;
   5023 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5024 		break;
   5025 	case WM_T_ICH9:
   5026 	case WM_T_ICH10:
   5027 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5028 		    PBA_14K : PBA_10K;
   5029 		break;
   5030 	case WM_T_PCH:
   5031 	case WM_T_PCH2:	/* XXX 14K? */
   5032 	case WM_T_PCH_LPT:
   5033 	case WM_T_PCH_SPT:
   5034 	case WM_T_PCH_CNP:
   5035 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5036 		    PBA_12K : PBA_26K;
   5037 		break;
   5038 	default:
   5039 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5040 		    PBA_40K : PBA_48K;
   5041 		break;
   5042 	}
   5043 	/*
   5044 	 * Only old or non-multiqueue devices have the PBA register
   5045 	 * XXX Need special handling for 82575.
   5046 	 */
   5047 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5048 	    || (sc->sc_type == WM_T_82575))
   5049 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5050 
   5051 	/* Prevent the PCI-E bus from sticking */
   5052 	if (sc->sc_flags & WM_F_PCIE) {
   5053 		int timeout = 800;
   5054 
   5055 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5056 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5057 
   5058 		while (timeout--) {
   5059 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5060 			    == 0)
   5061 				break;
   5062 			delay(100);
   5063 		}
   5064 		if (timeout == 0)
   5065 			device_printf(sc->sc_dev,
   5066 			    "failed to disable busmastering\n");
   5067 	}
   5068 
   5069 	/* Set the completion timeout for interface */
   5070 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5071 	    || (sc->sc_type == WM_T_82580)
   5072 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5073 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5074 		wm_set_pcie_completion_timeout(sc);
   5075 
   5076 	/* Clear interrupt */
   5077 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5078 	if (wm_is_using_msix(sc)) {
   5079 		if (sc->sc_type != WM_T_82574) {
   5080 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5081 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5082 		} else
   5083 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5084 	}
   5085 
   5086 	/* Stop the transmit and receive processes. */
   5087 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5088 	sc->sc_rctl &= ~RCTL_EN;
   5089 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5090 	CSR_WRITE_FLUSH(sc);
   5091 
   5092 	/* XXX set_tbi_sbp_82543() */
   5093 
   5094 	delay(10*1000);
   5095 
   5096 	/* Must acquire the MDIO ownership before MAC reset */
   5097 	switch (sc->sc_type) {
   5098 	case WM_T_82573:
   5099 	case WM_T_82574:
   5100 	case WM_T_82583:
   5101 		error = wm_get_hw_semaphore_82573(sc);
   5102 		break;
   5103 	default:
   5104 		break;
   5105 	}
   5106 
   5107 	/*
   5108 	 * 82541 Errata 29? & 82547 Errata 28?
   5109 	 * See also the description about PHY_RST bit in CTRL register
   5110 	 * in 8254x_GBe_SDM.pdf.
   5111 	 */
   5112 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5113 		CSR_WRITE(sc, WMREG_CTRL,
   5114 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5115 		CSR_WRITE_FLUSH(sc);
   5116 		delay(5000);
   5117 	}
   5118 
   5119 	switch (sc->sc_type) {
   5120 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5121 	case WM_T_82541:
   5122 	case WM_T_82541_2:
   5123 	case WM_T_82547:
   5124 	case WM_T_82547_2:
   5125 		/*
   5126 		 * On some chipsets, a reset through a memory-mapped write
   5127 		 * cycle can cause the chip to reset before completing the
   5128 		 * write cycle. This causes major headache that can be avoided
   5129 		 * by issuing the reset via indirect register writes through
   5130 		 * I/O space.
   5131 		 *
   5132 		 * So, if we successfully mapped the I/O BAR at attach time,
   5133 		 * use that. Otherwise, try our luck with a memory-mapped
   5134 		 * reset.
   5135 		 */
   5136 		if (sc->sc_flags & WM_F_IOH_VALID)
   5137 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5138 		else
   5139 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5140 		break;
   5141 	case WM_T_82545_3:
   5142 	case WM_T_82546_3:
   5143 		/* Use the shadow control register on these chips. */
   5144 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5145 		break;
   5146 	case WM_T_80003:
   5147 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5148 		sc->phy.acquire(sc);
   5149 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5150 		sc->phy.release(sc);
   5151 		break;
   5152 	case WM_T_ICH8:
   5153 	case WM_T_ICH9:
   5154 	case WM_T_ICH10:
   5155 	case WM_T_PCH:
   5156 	case WM_T_PCH2:
   5157 	case WM_T_PCH_LPT:
   5158 	case WM_T_PCH_SPT:
   5159 	case WM_T_PCH_CNP:
   5160 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5161 		if (wm_phy_resetisblocked(sc) == false) {
   5162 			/*
   5163 			 * Gate automatic PHY configuration by hardware on
   5164 			 * non-managed 82579
   5165 			 */
   5166 			if ((sc->sc_type == WM_T_PCH2)
   5167 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5168 				== 0))
   5169 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5170 
   5171 			reg |= CTRL_PHY_RESET;
   5172 			phy_reset = 1;
   5173 		} else
   5174 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5175 		sc->phy.acquire(sc);
   5176 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5177 		/* Don't insert a completion barrier when reset */
   5178 		delay(20*1000);
   5179 		mutex_exit(sc->sc_ich_phymtx);
   5180 		break;
   5181 	case WM_T_82580:
   5182 	case WM_T_I350:
   5183 	case WM_T_I354:
   5184 	case WM_T_I210:
   5185 	case WM_T_I211:
   5186 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5187 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5188 			CSR_WRITE_FLUSH(sc);
   5189 		delay(5000);
   5190 		break;
   5191 	case WM_T_82542_2_0:
   5192 	case WM_T_82542_2_1:
   5193 	case WM_T_82543:
   5194 	case WM_T_82540:
   5195 	case WM_T_82545:
   5196 	case WM_T_82546:
   5197 	case WM_T_82571:
   5198 	case WM_T_82572:
   5199 	case WM_T_82573:
   5200 	case WM_T_82574:
   5201 	case WM_T_82575:
   5202 	case WM_T_82576:
   5203 	case WM_T_82583:
   5204 	default:
   5205 		/* Everything else can safely use the documented method. */
   5206 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5207 		break;
   5208 	}
   5209 
   5210 	/* Must release the MDIO ownership after MAC reset */
   5211 	switch (sc->sc_type) {
   5212 	case WM_T_82573:
   5213 	case WM_T_82574:
   5214 	case WM_T_82583:
   5215 		if (error == 0)
   5216 			wm_put_hw_semaphore_82573(sc);
   5217 		break;
   5218 	default:
   5219 		break;
   5220 	}
   5221 
   5222 	/* Set Phy Config Counter to 50msec */
   5223 	if (sc->sc_type == WM_T_PCH2) {
   5224 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5225 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5226 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5227 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5228 	}
   5229 
   5230 	if (phy_reset != 0)
   5231 		wm_get_cfg_done(sc);
   5232 
   5233 	/* Reload EEPROM */
   5234 	switch (sc->sc_type) {
   5235 	case WM_T_82542_2_0:
   5236 	case WM_T_82542_2_1:
   5237 	case WM_T_82543:
   5238 	case WM_T_82544:
   5239 		delay(10);
   5240 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5241 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5242 		CSR_WRITE_FLUSH(sc);
   5243 		delay(2000);
   5244 		break;
   5245 	case WM_T_82540:
   5246 	case WM_T_82545:
   5247 	case WM_T_82545_3:
   5248 	case WM_T_82546:
   5249 	case WM_T_82546_3:
   5250 		delay(5*1000);
   5251 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5252 		break;
   5253 	case WM_T_82541:
   5254 	case WM_T_82541_2:
   5255 	case WM_T_82547:
   5256 	case WM_T_82547_2:
   5257 		delay(20000);
   5258 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5259 		break;
   5260 	case WM_T_82571:
   5261 	case WM_T_82572:
   5262 	case WM_T_82573:
   5263 	case WM_T_82574:
   5264 	case WM_T_82583:
   5265 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5266 			delay(10);
   5267 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5268 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5269 			CSR_WRITE_FLUSH(sc);
   5270 		}
   5271 		/* check EECD_EE_AUTORD */
   5272 		wm_get_auto_rd_done(sc);
   5273 		/*
   5274 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5275 		 * is set.
   5276 		 */
   5277 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5278 		    || (sc->sc_type == WM_T_82583))
   5279 			delay(25*1000);
   5280 		break;
   5281 	case WM_T_82575:
   5282 	case WM_T_82576:
   5283 	case WM_T_82580:
   5284 	case WM_T_I350:
   5285 	case WM_T_I354:
   5286 	case WM_T_I210:
   5287 	case WM_T_I211:
   5288 	case WM_T_80003:
   5289 		/* check EECD_EE_AUTORD */
   5290 		wm_get_auto_rd_done(sc);
   5291 		break;
   5292 	case WM_T_ICH8:
   5293 	case WM_T_ICH9:
   5294 	case WM_T_ICH10:
   5295 	case WM_T_PCH:
   5296 	case WM_T_PCH2:
   5297 	case WM_T_PCH_LPT:
   5298 	case WM_T_PCH_SPT:
   5299 	case WM_T_PCH_CNP:
   5300 		break;
   5301 	default:
   5302 		panic("%s: unknown type\n", __func__);
   5303 	}
   5304 
   5305 	/* Check whether EEPROM is present or not */
   5306 	switch (sc->sc_type) {
   5307 	case WM_T_82575:
   5308 	case WM_T_82576:
   5309 	case WM_T_82580:
   5310 	case WM_T_I350:
   5311 	case WM_T_I354:
   5312 	case WM_T_ICH8:
   5313 	case WM_T_ICH9:
   5314 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5315 			/* Not found */
   5316 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5317 			if (sc->sc_type == WM_T_82575)
   5318 				wm_reset_init_script_82575(sc);
   5319 		}
   5320 		break;
   5321 	default:
   5322 		break;
   5323 	}
   5324 
   5325 	if (phy_reset != 0)
   5326 		wm_phy_post_reset(sc);
   5327 
   5328 	if ((sc->sc_type == WM_T_82580)
   5329 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5330 		/* Clear global device reset status bit */
   5331 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5332 	}
   5333 
   5334 	/* Clear any pending interrupt events. */
   5335 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5336 	reg = CSR_READ(sc, WMREG_ICR);
   5337 	if (wm_is_using_msix(sc)) {
   5338 		if (sc->sc_type != WM_T_82574) {
   5339 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5340 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5341 		} else
   5342 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5343 	}
   5344 
   5345 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5346 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5347 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5348 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5349 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5350 		reg |= KABGTXD_BGSQLBIAS;
   5351 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5352 	}
   5353 
   5354 	/* Reload sc_ctrl */
   5355 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5356 
   5357 	wm_set_eee(sc);
   5358 
   5359 	/*
   5360 	 * For PCH, this write will make sure that any noise will be detected
   5361 	 * as a CRC error and be dropped rather than show up as a bad packet
   5362 	 * to the DMA engine
   5363 	 */
   5364 	if (sc->sc_type == WM_T_PCH)
   5365 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5366 
   5367 	if (sc->sc_type >= WM_T_82544)
   5368 		CSR_WRITE(sc, WMREG_WUC, 0);
   5369 
   5370 	if (sc->sc_type < WM_T_82575)
   5371 		wm_disable_aspm(sc); /* Workaround for some chips */
   5372 
   5373 	wm_reset_mdicnfg_82580(sc);
   5374 
   5375 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5376 		wm_pll_workaround_i210(sc);
   5377 
   5378 	if (sc->sc_type == WM_T_80003) {
   5379 		/* Default to TRUE to enable the MDIC W/A */
   5380 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5381 
   5382 		rv = wm_kmrn_readreg(sc,
   5383 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5384 		if (rv == 0) {
   5385 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5386 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5387 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5388 			else
   5389 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5390 		}
   5391 	}
   5392 }
   5393 
   5394 /*
   5395  * wm_add_rxbuf:
   5396  *
   5397  *	Add a receive buffer to the indiciated descriptor.
   5398  */
   5399 static int
   5400 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5401 {
   5402 	struct wm_softc *sc = rxq->rxq_sc;
   5403 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5404 	struct mbuf *m;
   5405 	int error;
   5406 
   5407 	KASSERT(mutex_owned(rxq->rxq_lock));
   5408 
   5409 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5410 	if (m == NULL)
   5411 		return ENOBUFS;
   5412 
   5413 	MCLGET(m, M_DONTWAIT);
   5414 	if ((m->m_flags & M_EXT) == 0) {
   5415 		m_freem(m);
   5416 		return ENOBUFS;
   5417 	}
   5418 
   5419 	if (rxs->rxs_mbuf != NULL)
   5420 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5421 
   5422 	rxs->rxs_mbuf = m;
   5423 
   5424 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5425 	/*
   5426 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5427 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5428 	 */
   5429 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5430 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5431 	if (error) {
   5432 		/* XXX XXX XXX */
   5433 		aprint_error_dev(sc->sc_dev,
   5434 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5435 		panic("wm_add_rxbuf");
   5436 	}
   5437 
   5438 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5439 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5440 
   5441 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5442 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5443 			wm_init_rxdesc(rxq, idx);
   5444 	} else
   5445 		wm_init_rxdesc(rxq, idx);
   5446 
   5447 	return 0;
   5448 }
   5449 
   5450 /*
   5451  * wm_rxdrain:
   5452  *
   5453  *	Drain the receive queue.
   5454  */
   5455 static void
   5456 wm_rxdrain(struct wm_rxqueue *rxq)
   5457 {
   5458 	struct wm_softc *sc = rxq->rxq_sc;
   5459 	struct wm_rxsoft *rxs;
   5460 	int i;
   5461 
   5462 	KASSERT(mutex_owned(rxq->rxq_lock));
   5463 
   5464 	for (i = 0; i < WM_NRXDESC; i++) {
   5465 		rxs = &rxq->rxq_soft[i];
   5466 		if (rxs->rxs_mbuf != NULL) {
   5467 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5468 			m_freem(rxs->rxs_mbuf);
   5469 			rxs->rxs_mbuf = NULL;
   5470 		}
   5471 	}
   5472 }
   5473 
   5474 /*
   5475  * Setup registers for RSS.
   5476  *
   5477  * XXX not yet VMDq support
   5478  */
   5479 static void
   5480 wm_init_rss(struct wm_softc *sc)
   5481 {
   5482 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5483 	int i;
   5484 
   5485 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5486 
   5487 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5488 		unsigned int qid, reta_ent;
   5489 
   5490 		qid  = i % sc->sc_nqueues;
   5491 		switch (sc->sc_type) {
   5492 		case WM_T_82574:
   5493 			reta_ent = __SHIFTIN(qid,
   5494 			    RETA_ENT_QINDEX_MASK_82574);
   5495 			break;
   5496 		case WM_T_82575:
   5497 			reta_ent = __SHIFTIN(qid,
   5498 			    RETA_ENT_QINDEX1_MASK_82575);
   5499 			break;
   5500 		default:
   5501 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5502 			break;
   5503 		}
   5504 
   5505 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5506 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5507 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5508 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5509 	}
   5510 
   5511 	rss_getkey((uint8_t *)rss_key);
   5512 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5513 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5514 
   5515 	if (sc->sc_type == WM_T_82574)
   5516 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5517 	else
   5518 		mrqc = MRQC_ENABLE_RSS_MQ;
   5519 
   5520 	/*
   5521 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5522 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5523 	 */
   5524 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5525 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5526 #if 0
   5527 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5528 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5529 #endif
   5530 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5531 
   5532 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5533 }
   5534 
   5535 /*
   5536  * Adjust TX and RX queue numbers which the system actulally uses.
   5537  *
   5538  * The numbers are affected by below parameters.
   5539  *     - The nubmer of hardware queues
   5540  *     - The number of MSI-X vectors (= "nvectors" argument)
   5541  *     - ncpu
   5542  */
   5543 static void
   5544 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5545 {
   5546 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5547 
   5548 	if (nvectors < 2) {
   5549 		sc->sc_nqueues = 1;
   5550 		return;
   5551 	}
   5552 
   5553 	switch (sc->sc_type) {
   5554 	case WM_T_82572:
   5555 		hw_ntxqueues = 2;
   5556 		hw_nrxqueues = 2;
   5557 		break;
   5558 	case WM_T_82574:
   5559 		hw_ntxqueues = 2;
   5560 		hw_nrxqueues = 2;
   5561 		break;
   5562 	case WM_T_82575:
   5563 		hw_ntxqueues = 4;
   5564 		hw_nrxqueues = 4;
   5565 		break;
   5566 	case WM_T_82576:
   5567 		hw_ntxqueues = 16;
   5568 		hw_nrxqueues = 16;
   5569 		break;
   5570 	case WM_T_82580:
   5571 	case WM_T_I350:
   5572 	case WM_T_I354:
   5573 		hw_ntxqueues = 8;
   5574 		hw_nrxqueues = 8;
   5575 		break;
   5576 	case WM_T_I210:
   5577 		hw_ntxqueues = 4;
   5578 		hw_nrxqueues = 4;
   5579 		break;
   5580 	case WM_T_I211:
   5581 		hw_ntxqueues = 2;
   5582 		hw_nrxqueues = 2;
   5583 		break;
   5584 		/*
   5585 		 * As below ethernet controllers does not support MSI-X,
   5586 		 * this driver let them not use multiqueue.
   5587 		 *     - WM_T_80003
   5588 		 *     - WM_T_ICH8
   5589 		 *     - WM_T_ICH9
   5590 		 *     - WM_T_ICH10
   5591 		 *     - WM_T_PCH
   5592 		 *     - WM_T_PCH2
   5593 		 *     - WM_T_PCH_LPT
   5594 		 */
   5595 	default:
   5596 		hw_ntxqueues = 1;
   5597 		hw_nrxqueues = 1;
   5598 		break;
   5599 	}
   5600 
   5601 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5602 
   5603 	/*
   5604 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5605 	 * the number of queues used actually.
   5606 	 */
   5607 	if (nvectors < hw_nqueues + 1)
   5608 		sc->sc_nqueues = nvectors - 1;
   5609 	else
   5610 		sc->sc_nqueues = hw_nqueues;
   5611 
   5612 	/*
   5613 	 * As queues more then cpus cannot improve scaling, we limit
   5614 	 * the number of queues used actually.
   5615 	 */
   5616 	if (ncpu < sc->sc_nqueues)
   5617 		sc->sc_nqueues = ncpu;
   5618 }
   5619 
   5620 static inline bool
   5621 wm_is_using_msix(struct wm_softc *sc)
   5622 {
   5623 
   5624 	return (sc->sc_nintrs > 1);
   5625 }
   5626 
   5627 static inline bool
   5628 wm_is_using_multiqueue(struct wm_softc *sc)
   5629 {
   5630 
   5631 	return (sc->sc_nqueues > 1);
   5632 }
   5633 
   5634 static int
   5635 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5636 {
   5637 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5638 
   5639 	wmq->wmq_id = qidx;
   5640 	wmq->wmq_intr_idx = intr_idx;
   5641 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5642 	    wm_handle_queue, wmq);
   5643 	if (wmq->wmq_si != NULL)
   5644 		return 0;
   5645 
   5646 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5647 	    wmq->wmq_id);
   5648 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5649 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5650 	return ENOMEM;
   5651 }
   5652 
   5653 /*
   5654  * Both single interrupt MSI and INTx can use this function.
   5655  */
   5656 static int
   5657 wm_setup_legacy(struct wm_softc *sc)
   5658 {
   5659 	pci_chipset_tag_t pc = sc->sc_pc;
   5660 	const char *intrstr = NULL;
   5661 	char intrbuf[PCI_INTRSTR_LEN];
   5662 	int error;
   5663 
   5664 	error = wm_alloc_txrx_queues(sc);
   5665 	if (error) {
   5666 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5667 		    error);
   5668 		return ENOMEM;
   5669 	}
   5670 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5671 	    sizeof(intrbuf));
   5672 #ifdef WM_MPSAFE
   5673 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5674 #endif
   5675 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5676 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5677 	if (sc->sc_ihs[0] == NULL) {
   5678 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5679 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5680 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5681 		return ENOMEM;
   5682 	}
   5683 
   5684 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5685 	sc->sc_nintrs = 1;
   5686 
   5687 	return wm_softint_establish_queue(sc, 0, 0);
   5688 }
   5689 
   5690 static int
   5691 wm_setup_msix(struct wm_softc *sc)
   5692 {
   5693 	void *vih;
   5694 	kcpuset_t *affinity;
   5695 	int qidx, error, intr_idx, txrx_established;
   5696 	pci_chipset_tag_t pc = sc->sc_pc;
   5697 	const char *intrstr = NULL;
   5698 	char intrbuf[PCI_INTRSTR_LEN];
   5699 	char intr_xname[INTRDEVNAMEBUF];
   5700 
   5701 	if (sc->sc_nqueues < ncpu) {
   5702 		/*
   5703 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5704 		 * interrupts start from CPU#1.
   5705 		 */
   5706 		sc->sc_affinity_offset = 1;
   5707 	} else {
   5708 		/*
   5709 		 * In this case, this device use all CPUs. So, we unify
   5710 		 * affinitied cpu_index to msix vector number for readability.
   5711 		 */
   5712 		sc->sc_affinity_offset = 0;
   5713 	}
   5714 
   5715 	error = wm_alloc_txrx_queues(sc);
   5716 	if (error) {
   5717 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5718 		    error);
   5719 		return ENOMEM;
   5720 	}
   5721 
   5722 	kcpuset_create(&affinity, false);
   5723 	intr_idx = 0;
   5724 
   5725 	/*
   5726 	 * TX and RX
   5727 	 */
   5728 	txrx_established = 0;
   5729 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5730 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5731 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5732 
   5733 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5734 		    sizeof(intrbuf));
   5735 #ifdef WM_MPSAFE
   5736 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5737 		    PCI_INTR_MPSAFE, true);
   5738 #endif
   5739 		memset(intr_xname, 0, sizeof(intr_xname));
   5740 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5741 		    device_xname(sc->sc_dev), qidx);
   5742 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5743 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5744 		if (vih == NULL) {
   5745 			aprint_error_dev(sc->sc_dev,
   5746 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5747 			    intrstr ? " at " : "",
   5748 			    intrstr ? intrstr : "");
   5749 
   5750 			goto fail;
   5751 		}
   5752 		kcpuset_zero(affinity);
   5753 		/* Round-robin affinity */
   5754 		kcpuset_set(affinity, affinity_to);
   5755 		error = interrupt_distribute(vih, affinity, NULL);
   5756 		if (error == 0) {
   5757 			aprint_normal_dev(sc->sc_dev,
   5758 			    "for TX and RX interrupting at %s affinity to %u\n",
   5759 			    intrstr, affinity_to);
   5760 		} else {
   5761 			aprint_normal_dev(sc->sc_dev,
   5762 			    "for TX and RX interrupting at %s\n", intrstr);
   5763 		}
   5764 		sc->sc_ihs[intr_idx] = vih;
   5765 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5766 			goto fail;
   5767 		txrx_established++;
   5768 		intr_idx++;
   5769 	}
   5770 
   5771 	/* LINK */
   5772 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5773 	    sizeof(intrbuf));
   5774 #ifdef WM_MPSAFE
   5775 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5776 #endif
   5777 	memset(intr_xname, 0, sizeof(intr_xname));
   5778 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5779 	    device_xname(sc->sc_dev));
   5780 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5781 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5782 	if (vih == NULL) {
   5783 		aprint_error_dev(sc->sc_dev,
   5784 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5785 		    intrstr ? " at " : "",
   5786 		    intrstr ? intrstr : "");
   5787 
   5788 		goto fail;
   5789 	}
   5790 	/* Keep default affinity to LINK interrupt */
   5791 	aprint_normal_dev(sc->sc_dev,
   5792 	    "for LINK interrupting at %s\n", intrstr);
   5793 	sc->sc_ihs[intr_idx] = vih;
   5794 	sc->sc_link_intr_idx = intr_idx;
   5795 
   5796 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5797 	kcpuset_destroy(affinity);
   5798 	return 0;
   5799 
   5800  fail:
   5801 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5802 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5803 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5804 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5805 	}
   5806 
   5807 	kcpuset_destroy(affinity);
   5808 	return ENOMEM;
   5809 }
   5810 
   5811 static void
   5812 wm_unset_stopping_flags(struct wm_softc *sc)
   5813 {
   5814 	int i;
   5815 
   5816 	KASSERT(WM_CORE_LOCKED(sc));
   5817 
   5818 	/* Must unset stopping flags in ascending order. */
   5819 	for (i = 0; i < sc->sc_nqueues; i++) {
   5820 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5821 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5822 
   5823 		mutex_enter(txq->txq_lock);
   5824 		txq->txq_stopping = false;
   5825 		mutex_exit(txq->txq_lock);
   5826 
   5827 		mutex_enter(rxq->rxq_lock);
   5828 		rxq->rxq_stopping = false;
   5829 		mutex_exit(rxq->rxq_lock);
   5830 	}
   5831 
   5832 	sc->sc_core_stopping = false;
   5833 }
   5834 
   5835 static void
   5836 wm_set_stopping_flags(struct wm_softc *sc)
   5837 {
   5838 	int i;
   5839 
   5840 	KASSERT(WM_CORE_LOCKED(sc));
   5841 
   5842 	sc->sc_core_stopping = true;
   5843 
   5844 	/* Must set stopping flags in ascending order. */
   5845 	for (i = 0; i < sc->sc_nqueues; i++) {
   5846 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5847 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5848 
   5849 		mutex_enter(rxq->rxq_lock);
   5850 		rxq->rxq_stopping = true;
   5851 		mutex_exit(rxq->rxq_lock);
   5852 
   5853 		mutex_enter(txq->txq_lock);
   5854 		txq->txq_stopping = true;
   5855 		mutex_exit(txq->txq_lock);
   5856 	}
   5857 }
   5858 
   5859 /*
   5860  * Write interrupt interval value to ITR or EITR
   5861  */
   5862 static void
   5863 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5864 {
   5865 
   5866 	if (!wmq->wmq_set_itr)
   5867 		return;
   5868 
   5869 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5870 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5871 
   5872 		/*
   5873 		 * 82575 doesn't have CNT_INGR field.
   5874 		 * So, overwrite counter field by software.
   5875 		 */
   5876 		if (sc->sc_type == WM_T_82575)
   5877 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5878 		else
   5879 			eitr |= EITR_CNT_INGR;
   5880 
   5881 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5882 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5883 		/*
   5884 		 * 82574 has both ITR and EITR. SET EITR when we use
   5885 		 * the multi queue function with MSI-X.
   5886 		 */
   5887 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5888 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5889 	} else {
   5890 		KASSERT(wmq->wmq_id == 0);
   5891 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5892 	}
   5893 
   5894 	wmq->wmq_set_itr = false;
   5895 }
   5896 
   5897 /*
   5898  * TODO
   5899  * Below dynamic calculation of itr is almost the same as linux igb,
   5900  * however it does not fit to wm(4). So, we will have been disable AIM
   5901  * until we will find appropriate calculation of itr.
   5902  */
   5903 /*
   5904  * calculate interrupt interval value to be going to write register in
   5905  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5906  */
   5907 static void
   5908 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5909 {
   5910 #ifdef NOTYET
   5911 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5912 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5913 	uint32_t avg_size = 0;
   5914 	uint32_t new_itr;
   5915 
   5916 	if (rxq->rxq_packets)
   5917 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5918 	if (txq->txq_packets)
   5919 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5920 
   5921 	if (avg_size == 0) {
   5922 		new_itr = 450; /* restore default value */
   5923 		goto out;
   5924 	}
   5925 
   5926 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5927 	avg_size += 24;
   5928 
   5929 	/* Don't starve jumbo frames */
   5930 	avg_size = uimin(avg_size, 3000);
   5931 
   5932 	/* Give a little boost to mid-size frames */
   5933 	if ((avg_size > 300) && (avg_size < 1200))
   5934 		new_itr = avg_size / 3;
   5935 	else
   5936 		new_itr = avg_size / 2;
   5937 
   5938 out:
   5939 	/*
   5940 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5941 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5942 	 */
   5943 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5944 		new_itr *= 4;
   5945 
   5946 	if (new_itr != wmq->wmq_itr) {
   5947 		wmq->wmq_itr = new_itr;
   5948 		wmq->wmq_set_itr = true;
   5949 	} else
   5950 		wmq->wmq_set_itr = false;
   5951 
   5952 	rxq->rxq_packets = 0;
   5953 	rxq->rxq_bytes = 0;
   5954 	txq->txq_packets = 0;
   5955 	txq->txq_bytes = 0;
   5956 #endif
   5957 }
   5958 
   5959 static void
   5960 wm_init_sysctls(struct wm_softc *sc)
   5961 {
   5962 	struct sysctllog **log;
   5963 	const struct sysctlnode *rnode, *qnode, *cnode;
   5964 	int i, rv;
   5965 	const char *dvname;
   5966 
   5967 	log = &sc->sc_sysctllog;
   5968 	dvname = device_xname(sc->sc_dev);
   5969 
   5970 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5971 	    0, CTLTYPE_NODE, dvname,
   5972 	    SYSCTL_DESCR("wm information and settings"),
   5973 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5974 	if (rv != 0)
   5975 		goto err;
   5976 
   5977 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5978 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5979 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5980 	if (rv != 0)
   5981 		goto teardown;
   5982 
   5983 	for (i = 0; i < sc->sc_nqueues; i++) {
   5984 		struct wm_queue *wmq = &sc->sc_queue[i];
   5985 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5986 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5987 
   5988 		snprintf(sc->sc_queue[i].sysctlname,
   5989 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   5990 
   5991 		if (sysctl_createv(log, 0, &rnode, &qnode,
   5992 		    0, CTLTYPE_NODE,
   5993 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   5994 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5995 			break;
   5996 
   5997 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5998 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5999 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6000 		    NULL, 0, &txq->txq_free,
   6001 		    0, CTL_CREATE, CTL_EOL) != 0)
   6002 			break;
   6003 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6004 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6005 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6006 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6007 		    0, CTL_CREATE, CTL_EOL) != 0)
   6008 			break;
   6009 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6010 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6011 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6012 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6013 		    0, CTL_CREATE, CTL_EOL) != 0)
   6014 			break;
   6015 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6016 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6017 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6018 		    NULL, 0, &txq->txq_next,
   6019 		    0, CTL_CREATE, CTL_EOL) != 0)
   6020 			break;
   6021 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6022 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6023 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6024 		    NULL, 0, &txq->txq_sfree,
   6025 		    0, CTL_CREATE, CTL_EOL) != 0)
   6026 			break;
   6027 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6028 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6029 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6030 		    NULL, 0, &txq->txq_snext,
   6031 		    0, CTL_CREATE, CTL_EOL) != 0)
   6032 			break;
   6033 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6034 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6035 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6036 		    NULL, 0, &txq->txq_sdirty,
   6037 		    0, CTL_CREATE, CTL_EOL) != 0)
   6038 			break;
   6039 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6040 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6041 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6042 		    NULL, 0, &txq->txq_flags,
   6043 		    0, CTL_CREATE, CTL_EOL) != 0)
   6044 			break;
   6045 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6046 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6047 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6048 		    NULL, 0, &txq->txq_stopping,
   6049 		    0, CTL_CREATE, CTL_EOL) != 0)
   6050 			break;
   6051 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6052 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6053 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6054 		    NULL, 0, &txq->txq_sending,
   6055 		    0, CTL_CREATE, CTL_EOL) != 0)
   6056 			break;
   6057 
   6058 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6059 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6060 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6061 		    NULL, 0, &rxq->rxq_ptr,
   6062 		    0, CTL_CREATE, CTL_EOL) != 0)
   6063 			break;
   6064 	}
   6065 
   6066 #ifdef WM_DEBUG
   6067 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6068 	    CTLTYPE_INT, "debug_flags",
   6069 	    SYSCTL_DESCR(
   6070 		    "Debug flags:\n"	\
   6071 		    "\t0x01 LINK\n"	\
   6072 		    "\t0x02 TX\n"	\
   6073 		    "\t0x04 RX\n"	\
   6074 		    "\t0x08 GMII\n"	\
   6075 		    "\t0x10 MANAGE\n"	\
   6076 		    "\t0x20 NVM\n"	\
   6077 		    "\t0x40 INIT\n"	\
   6078 		    "\t0x80 LOCK"),
   6079 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6080 	if (rv != 0)
   6081 		goto teardown;
   6082 #endif
   6083 
   6084 	return;
   6085 
   6086 teardown:
   6087 	sysctl_teardown(log);
   6088 err:
   6089 	sc->sc_sysctllog = NULL;
   6090 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6091 	    __func__, rv);
   6092 }
   6093 
   6094 /*
   6095  * wm_init:		[ifnet interface function]
   6096  *
   6097  *	Initialize the interface.
   6098  */
   6099 static int
   6100 wm_init(struct ifnet *ifp)
   6101 {
   6102 	struct wm_softc *sc = ifp->if_softc;
   6103 	int ret;
   6104 
   6105 	WM_CORE_LOCK(sc);
   6106 	ret = wm_init_locked(ifp);
   6107 	WM_CORE_UNLOCK(sc);
   6108 
   6109 	return ret;
   6110 }
   6111 
   6112 static int
   6113 wm_init_locked(struct ifnet *ifp)
   6114 {
   6115 	struct wm_softc *sc = ifp->if_softc;
   6116 	struct ethercom *ec = &sc->sc_ethercom;
   6117 	int i, j, trynum, error = 0;
   6118 	uint32_t reg, sfp_mask = 0;
   6119 
   6120 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6121 		device_xname(sc->sc_dev), __func__));
   6122 	KASSERT(WM_CORE_LOCKED(sc));
   6123 
   6124 	/*
   6125 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6126 	 * There is a small but measurable benefit to avoiding the adjusment
   6127 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6128 	 * on such platforms.  One possibility is that the DMA itself is
   6129 	 * slightly more efficient if the front of the entire packet (instead
   6130 	 * of the front of the headers) is aligned.
   6131 	 *
   6132 	 * Note we must always set align_tweak to 0 if we are using
   6133 	 * jumbo frames.
   6134 	 */
   6135 #ifdef __NO_STRICT_ALIGNMENT
   6136 	sc->sc_align_tweak = 0;
   6137 #else
   6138 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6139 		sc->sc_align_tweak = 0;
   6140 	else
   6141 		sc->sc_align_tweak = 2;
   6142 #endif /* __NO_STRICT_ALIGNMENT */
   6143 
   6144 	/* Cancel any pending I/O. */
   6145 	wm_stop_locked(ifp, false, false);
   6146 
   6147 	/* Update statistics before reset */
   6148 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6149 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6150 
   6151 	/* >= PCH_SPT hardware workaround before reset. */
   6152 	if (sc->sc_type >= WM_T_PCH_SPT)
   6153 		wm_flush_desc_rings(sc);
   6154 
   6155 	/* Reset the chip to a known state. */
   6156 	wm_reset(sc);
   6157 
   6158 	/*
   6159 	 * AMT based hardware can now take control from firmware
   6160 	 * Do this after reset.
   6161 	 */
   6162 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6163 		wm_get_hw_control(sc);
   6164 
   6165 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6166 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6167 		wm_legacy_irq_quirk_spt(sc);
   6168 
   6169 	/* Init hardware bits */
   6170 	wm_initialize_hardware_bits(sc);
   6171 
   6172 	/* Reset the PHY. */
   6173 	if (sc->sc_flags & WM_F_HAS_MII)
   6174 		wm_gmii_reset(sc);
   6175 
   6176 	if (sc->sc_type >= WM_T_ICH8) {
   6177 		reg = CSR_READ(sc, WMREG_GCR);
   6178 		/*
   6179 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6180 		 * default after reset.
   6181 		 */
   6182 		if (sc->sc_type == WM_T_ICH8)
   6183 			reg |= GCR_NO_SNOOP_ALL;
   6184 		else
   6185 			reg &= ~GCR_NO_SNOOP_ALL;
   6186 		CSR_WRITE(sc, WMREG_GCR, reg);
   6187 	}
   6188 
   6189 	if ((sc->sc_type >= WM_T_ICH8)
   6190 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6191 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6192 
   6193 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6194 		reg |= CTRL_EXT_RO_DIS;
   6195 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6196 	}
   6197 
   6198 	/* Calculate (E)ITR value */
   6199 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6200 		/*
   6201 		 * For NEWQUEUE's EITR (except for 82575).
   6202 		 * 82575's EITR should be set same throttling value as other
   6203 		 * old controllers' ITR because the interrupt/sec calculation
   6204 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6205 		 *
   6206 		 * 82574's EITR should be set same throttling value as ITR.
   6207 		 *
   6208 		 * For N interrupts/sec, set this value to:
   6209 		 * 1,000,000 / N in contrast to ITR throttoling value.
   6210 		 */
   6211 		sc->sc_itr_init = 450;
   6212 	} else if (sc->sc_type >= WM_T_82543) {
   6213 		/*
   6214 		 * Set up the interrupt throttling register (units of 256ns)
   6215 		 * Note that a footnote in Intel's documentation says this
   6216 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6217 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6218 		 * that that is also true for the 1024ns units of the other
   6219 		 * interrupt-related timer registers -- so, really, we ought
   6220 		 * to divide this value by 4 when the link speed is low.
   6221 		 *
   6222 		 * XXX implement this division at link speed change!
   6223 		 */
   6224 
   6225 		/*
   6226 		 * For N interrupts/sec, set this value to:
   6227 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6228 		 * absolute and packet timer values to this value
   6229 		 * divided by 4 to get "simple timer" behavior.
   6230 		 */
   6231 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6232 	}
   6233 
   6234 	error = wm_init_txrx_queues(sc);
   6235 	if (error)
   6236 		goto out;
   6237 
   6238 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6239 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6240 	    (sc->sc_type >= WM_T_82575))
   6241 		wm_serdes_power_up_link_82575(sc);
   6242 
   6243 	/* Clear out the VLAN table -- we don't use it (yet). */
   6244 	CSR_WRITE(sc, WMREG_VET, 0);
   6245 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6246 		trynum = 10; /* Due to hw errata */
   6247 	else
   6248 		trynum = 1;
   6249 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6250 		for (j = 0; j < trynum; j++)
   6251 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6252 
   6253 	/*
   6254 	 * Set up flow-control parameters.
   6255 	 *
   6256 	 * XXX Values could probably stand some tuning.
   6257 	 */
   6258 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6259 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6260 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6261 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6262 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6263 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6264 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6265 	}
   6266 
   6267 	sc->sc_fcrtl = FCRTL_DFLT;
   6268 	if (sc->sc_type < WM_T_82543) {
   6269 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6270 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6271 	} else {
   6272 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6273 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6274 	}
   6275 
   6276 	if (sc->sc_type == WM_T_80003)
   6277 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6278 	else
   6279 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6280 
   6281 	/* Writes the control register. */
   6282 	wm_set_vlan(sc);
   6283 
   6284 	if (sc->sc_flags & WM_F_HAS_MII) {
   6285 		uint16_t kmreg;
   6286 
   6287 		switch (sc->sc_type) {
   6288 		case WM_T_80003:
   6289 		case WM_T_ICH8:
   6290 		case WM_T_ICH9:
   6291 		case WM_T_ICH10:
   6292 		case WM_T_PCH:
   6293 		case WM_T_PCH2:
   6294 		case WM_T_PCH_LPT:
   6295 		case WM_T_PCH_SPT:
   6296 		case WM_T_PCH_CNP:
   6297 			/*
   6298 			 * Set the mac to wait the maximum time between each
   6299 			 * iteration and increase the max iterations when
   6300 			 * polling the phy; this fixes erroneous timeouts at
   6301 			 * 10Mbps.
   6302 			 */
   6303 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6304 			    0xFFFF);
   6305 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6306 			    &kmreg);
   6307 			kmreg |= 0x3F;
   6308 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6309 			    kmreg);
   6310 			break;
   6311 		default:
   6312 			break;
   6313 		}
   6314 
   6315 		if (sc->sc_type == WM_T_80003) {
   6316 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6317 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6318 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6319 
   6320 			/* Bypass RX and TX FIFO's */
   6321 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6322 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6323 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6324 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6325 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6326 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6327 		}
   6328 	}
   6329 #if 0
   6330 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6331 #endif
   6332 
   6333 	/* Set up checksum offload parameters. */
   6334 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6335 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6336 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6337 		reg |= RXCSUM_IPOFL;
   6338 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6339 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6340 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6341 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6342 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6343 
   6344 	/* Set registers about MSI-X */
   6345 	if (wm_is_using_msix(sc)) {
   6346 		uint32_t ivar, qintr_idx;
   6347 		struct wm_queue *wmq;
   6348 		unsigned int qid;
   6349 
   6350 		if (sc->sc_type == WM_T_82575) {
   6351 			/* Interrupt control */
   6352 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6353 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6354 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6355 
   6356 			/* TX and RX */
   6357 			for (i = 0; i < sc->sc_nqueues; i++) {
   6358 				wmq = &sc->sc_queue[i];
   6359 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6360 				    EITR_TX_QUEUE(wmq->wmq_id)
   6361 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6362 			}
   6363 			/* Link status */
   6364 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6365 			    EITR_OTHER);
   6366 		} else if (sc->sc_type == WM_T_82574) {
   6367 			/* Interrupt control */
   6368 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6369 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6370 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6371 
   6372 			/*
   6373 			 * Workaround issue with spurious interrupts
   6374 			 * in MSI-X mode.
   6375 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6376 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6377 			 */
   6378 			reg = CSR_READ(sc, WMREG_RFCTL);
   6379 			reg |= WMREG_RFCTL_ACKDIS;
   6380 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6381 
   6382 			ivar = 0;
   6383 			/* TX and RX */
   6384 			for (i = 0; i < sc->sc_nqueues; i++) {
   6385 				wmq = &sc->sc_queue[i];
   6386 				qid = wmq->wmq_id;
   6387 				qintr_idx = wmq->wmq_intr_idx;
   6388 
   6389 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6390 				    IVAR_TX_MASK_Q_82574(qid));
   6391 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6392 				    IVAR_RX_MASK_Q_82574(qid));
   6393 			}
   6394 			/* Link status */
   6395 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6396 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6397 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6398 		} else {
   6399 			/* Interrupt control */
   6400 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6401 			    | GPIE_EIAME | GPIE_PBA);
   6402 
   6403 			switch (sc->sc_type) {
   6404 			case WM_T_82580:
   6405 			case WM_T_I350:
   6406 			case WM_T_I354:
   6407 			case WM_T_I210:
   6408 			case WM_T_I211:
   6409 				/* TX and RX */
   6410 				for (i = 0; i < sc->sc_nqueues; i++) {
   6411 					wmq = &sc->sc_queue[i];
   6412 					qid = wmq->wmq_id;
   6413 					qintr_idx = wmq->wmq_intr_idx;
   6414 
   6415 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6416 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6417 					ivar |= __SHIFTIN((qintr_idx
   6418 						| IVAR_VALID),
   6419 					    IVAR_TX_MASK_Q(qid));
   6420 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6421 					ivar |= __SHIFTIN((qintr_idx
   6422 						| IVAR_VALID),
   6423 					    IVAR_RX_MASK_Q(qid));
   6424 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6425 				}
   6426 				break;
   6427 			case WM_T_82576:
   6428 				/* TX and RX */
   6429 				for (i = 0; i < sc->sc_nqueues; i++) {
   6430 					wmq = &sc->sc_queue[i];
   6431 					qid = wmq->wmq_id;
   6432 					qintr_idx = wmq->wmq_intr_idx;
   6433 
   6434 					ivar = CSR_READ(sc,
   6435 					    WMREG_IVAR_Q_82576(qid));
   6436 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6437 					ivar |= __SHIFTIN((qintr_idx
   6438 						| IVAR_VALID),
   6439 					    IVAR_TX_MASK_Q_82576(qid));
   6440 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6441 					ivar |= __SHIFTIN((qintr_idx
   6442 						| IVAR_VALID),
   6443 					    IVAR_RX_MASK_Q_82576(qid));
   6444 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6445 					    ivar);
   6446 				}
   6447 				break;
   6448 			default:
   6449 				break;
   6450 			}
   6451 
   6452 			/* Link status */
   6453 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6454 			    IVAR_MISC_OTHER);
   6455 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6456 		}
   6457 
   6458 		if (wm_is_using_multiqueue(sc)) {
   6459 			wm_init_rss(sc);
   6460 
   6461 			/*
   6462 			** NOTE: Receive Full-Packet Checksum Offload
   6463 			** is mutually exclusive with Multiqueue. However
   6464 			** this is not the same as TCP/IP checksums which
   6465 			** still work.
   6466 			*/
   6467 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6468 			reg |= RXCSUM_PCSD;
   6469 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6470 		}
   6471 	}
   6472 
   6473 	/* Set up the interrupt registers. */
   6474 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6475 
   6476 	/* Enable SFP module insertion interrupt if it's required */
   6477 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6478 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6479 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6480 		sfp_mask = ICR_GPI(0);
   6481 	}
   6482 
   6483 	if (wm_is_using_msix(sc)) {
   6484 		uint32_t mask;
   6485 		struct wm_queue *wmq;
   6486 
   6487 		switch (sc->sc_type) {
   6488 		case WM_T_82574:
   6489 			mask = 0;
   6490 			for (i = 0; i < sc->sc_nqueues; i++) {
   6491 				wmq = &sc->sc_queue[i];
   6492 				mask |= ICR_TXQ(wmq->wmq_id);
   6493 				mask |= ICR_RXQ(wmq->wmq_id);
   6494 			}
   6495 			mask |= ICR_OTHER;
   6496 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6497 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6498 			break;
   6499 		default:
   6500 			if (sc->sc_type == WM_T_82575) {
   6501 				mask = 0;
   6502 				for (i = 0; i < sc->sc_nqueues; i++) {
   6503 					wmq = &sc->sc_queue[i];
   6504 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6505 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6506 				}
   6507 				mask |= EITR_OTHER;
   6508 			} else {
   6509 				mask = 0;
   6510 				for (i = 0; i < sc->sc_nqueues; i++) {
   6511 					wmq = &sc->sc_queue[i];
   6512 					mask |= 1 << wmq->wmq_intr_idx;
   6513 				}
   6514 				mask |= 1 << sc->sc_link_intr_idx;
   6515 			}
   6516 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6517 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6518 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6519 
   6520 			/* For other interrupts */
   6521 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6522 			break;
   6523 		}
   6524 	} else {
   6525 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6526 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6527 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6528 	}
   6529 
   6530 	/* Set up the inter-packet gap. */
   6531 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6532 
   6533 	if (sc->sc_type >= WM_T_82543) {
   6534 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6535 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6536 			wm_itrs_writereg(sc, wmq);
   6537 		}
   6538 		/*
   6539 		 * Link interrupts occur much less than TX
   6540 		 * interrupts and RX interrupts. So, we don't
   6541 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6542 		 * FreeBSD's if_igb.
   6543 		 */
   6544 	}
   6545 
   6546 	/* Set the VLAN ethernetype. */
   6547 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6548 
   6549 	/*
   6550 	 * Set up the transmit control register; we start out with
   6551 	 * a collision distance suitable for FDX, but update it whe
   6552 	 * we resolve the media type.
   6553 	 */
   6554 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6555 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6556 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6557 	if (sc->sc_type >= WM_T_82571)
   6558 		sc->sc_tctl |= TCTL_MULR;
   6559 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6560 
   6561 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6562 		/* Write TDT after TCTL.EN is set. See the document. */
   6563 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6564 	}
   6565 
   6566 	if (sc->sc_type == WM_T_80003) {
   6567 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6568 		reg &= ~TCTL_EXT_GCEX_MASK;
   6569 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6570 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6571 	}
   6572 
   6573 	/* Set the media. */
   6574 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6575 		goto out;
   6576 
   6577 	/* Configure for OS presence */
   6578 	wm_init_manageability(sc);
   6579 
   6580 	/*
   6581 	 * Set up the receive control register; we actually program the
   6582 	 * register when we set the receive filter. Use multicast address
   6583 	 * offset type 0.
   6584 	 *
   6585 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6586 	 * don't enable that feature.
   6587 	 */
   6588 	sc->sc_mchash_type = 0;
   6589 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6590 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6591 
   6592 	/* 82574 use one buffer extended Rx descriptor. */
   6593 	if (sc->sc_type == WM_T_82574)
   6594 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6595 
   6596 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6597 		sc->sc_rctl |= RCTL_SECRC;
   6598 
   6599 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6600 	    && (ifp->if_mtu > ETHERMTU)) {
   6601 		sc->sc_rctl |= RCTL_LPE;
   6602 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6603 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6604 	}
   6605 
   6606 	if (MCLBYTES == 2048)
   6607 		sc->sc_rctl |= RCTL_2k;
   6608 	else {
   6609 		if (sc->sc_type >= WM_T_82543) {
   6610 			switch (MCLBYTES) {
   6611 			case 4096:
   6612 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6613 				break;
   6614 			case 8192:
   6615 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6616 				break;
   6617 			case 16384:
   6618 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6619 				break;
   6620 			default:
   6621 				panic("wm_init: MCLBYTES %d unsupported",
   6622 				    MCLBYTES);
   6623 				break;
   6624 			}
   6625 		} else
   6626 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6627 	}
   6628 
   6629 	/* Enable ECC */
   6630 	switch (sc->sc_type) {
   6631 	case WM_T_82571:
   6632 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6633 		reg |= PBA_ECC_CORR_EN;
   6634 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6635 		break;
   6636 	case WM_T_PCH_LPT:
   6637 	case WM_T_PCH_SPT:
   6638 	case WM_T_PCH_CNP:
   6639 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6640 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6641 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6642 
   6643 		sc->sc_ctrl |= CTRL_MEHE;
   6644 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6645 		break;
   6646 	default:
   6647 		break;
   6648 	}
   6649 
   6650 	/*
   6651 	 * Set the receive filter.
   6652 	 *
   6653 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6654 	 * the setting of RCTL.EN in wm_set_filter()
   6655 	 */
   6656 	wm_set_filter(sc);
   6657 
   6658 	/* On 575 and later set RDT only if RX enabled */
   6659 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6660 		int qidx;
   6661 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6662 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6663 			for (i = 0; i < WM_NRXDESC; i++) {
   6664 				mutex_enter(rxq->rxq_lock);
   6665 				wm_init_rxdesc(rxq, i);
   6666 				mutex_exit(rxq->rxq_lock);
   6667 
   6668 			}
   6669 		}
   6670 	}
   6671 
   6672 	wm_unset_stopping_flags(sc);
   6673 
   6674 	/* Start the one second link check clock. */
   6675 	callout_schedule(&sc->sc_tick_ch, hz);
   6676 
   6677 	/* ...all done! */
   6678 	ifp->if_flags |= IFF_RUNNING;
   6679 
   6680  out:
   6681 	/* Save last flags for the callback */
   6682 	sc->sc_if_flags = ifp->if_flags;
   6683 	sc->sc_ec_capenable = ec->ec_capenable;
   6684 	if (error)
   6685 		log(LOG_ERR, "%s: interface not running\n",
   6686 		    device_xname(sc->sc_dev));
   6687 	return error;
   6688 }
   6689 
   6690 /*
   6691  * wm_stop:		[ifnet interface function]
   6692  *
   6693  *	Stop transmission on the interface.
   6694  */
   6695 static void
   6696 wm_stop(struct ifnet *ifp, int disable)
   6697 {
   6698 	struct wm_softc *sc = ifp->if_softc;
   6699 
   6700 	ASSERT_SLEEPABLE();
   6701 
   6702 	WM_CORE_LOCK(sc);
   6703 	wm_stop_locked(ifp, disable ? true : false, true);
   6704 	WM_CORE_UNLOCK(sc);
   6705 
   6706 	/*
   6707 	 * After wm_set_stopping_flags(), it is guaranteed
   6708 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6709 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6710 	 * because it can sleep...
   6711 	 * so, call workqueue_wait() here.
   6712 	 */
   6713 	for (int i = 0; i < sc->sc_nqueues; i++)
   6714 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6715 }
   6716 
   6717 static void
   6718 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6719 {
   6720 	struct wm_softc *sc = ifp->if_softc;
   6721 	struct wm_txsoft *txs;
   6722 	int i, qidx;
   6723 
   6724 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6725 		device_xname(sc->sc_dev), __func__));
   6726 	KASSERT(WM_CORE_LOCKED(sc));
   6727 
   6728 	wm_set_stopping_flags(sc);
   6729 
   6730 	if (sc->sc_flags & WM_F_HAS_MII) {
   6731 		/* Down the MII. */
   6732 		mii_down(&sc->sc_mii);
   6733 	} else {
   6734 #if 0
   6735 		/* Should we clear PHY's status properly? */
   6736 		wm_reset(sc);
   6737 #endif
   6738 	}
   6739 
   6740 	/* Stop the transmit and receive processes. */
   6741 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6742 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6743 	sc->sc_rctl &= ~RCTL_EN;
   6744 
   6745 	/*
   6746 	 * Clear the interrupt mask to ensure the device cannot assert its
   6747 	 * interrupt line.
   6748 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6749 	 * service any currently pending or shared interrupt.
   6750 	 */
   6751 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6752 	sc->sc_icr = 0;
   6753 	if (wm_is_using_msix(sc)) {
   6754 		if (sc->sc_type != WM_T_82574) {
   6755 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6756 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6757 		} else
   6758 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6759 	}
   6760 
   6761 	/*
   6762 	 * Stop callouts after interrupts are disabled; if we have
   6763 	 * to wait for them, we will be releasing the CORE_LOCK
   6764 	 * briefly, which will unblock interrupts on the current CPU.
   6765 	 */
   6766 
   6767 	/* Stop the one second clock. */
   6768 	if (wait)
   6769 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6770 	else
   6771 		callout_stop(&sc->sc_tick_ch);
   6772 
   6773 	/* Stop the 82547 Tx FIFO stall check timer. */
   6774 	if (sc->sc_type == WM_T_82547) {
   6775 		if (wait)
   6776 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6777 		else
   6778 			callout_stop(&sc->sc_txfifo_ch);
   6779 	}
   6780 
   6781 	/* Release any queued transmit buffers. */
   6782 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6783 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6784 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6785 		struct mbuf *m;
   6786 
   6787 		mutex_enter(txq->txq_lock);
   6788 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6789 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6790 			txs = &txq->txq_soft[i];
   6791 			if (txs->txs_mbuf != NULL) {
   6792 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6793 				m_freem(txs->txs_mbuf);
   6794 				txs->txs_mbuf = NULL;
   6795 			}
   6796 		}
   6797 		/* Drain txq_interq */
   6798 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6799 			m_freem(m);
   6800 		mutex_exit(txq->txq_lock);
   6801 	}
   6802 
   6803 	/* Mark the interface as down and cancel the watchdog timer. */
   6804 	ifp->if_flags &= ~IFF_RUNNING;
   6805 
   6806 	if (disable) {
   6807 		for (i = 0; i < sc->sc_nqueues; i++) {
   6808 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6809 			mutex_enter(rxq->rxq_lock);
   6810 			wm_rxdrain(rxq);
   6811 			mutex_exit(rxq->rxq_lock);
   6812 		}
   6813 	}
   6814 
   6815 #if 0 /* notyet */
   6816 	if (sc->sc_type >= WM_T_82544)
   6817 		CSR_WRITE(sc, WMREG_WUC, 0);
   6818 #endif
   6819 }
   6820 
   6821 static void
   6822 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6823 {
   6824 	struct mbuf *m;
   6825 	int i;
   6826 
   6827 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6828 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6829 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6830 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6831 		    m->m_data, m->m_len, m->m_flags);
   6832 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6833 	    i, i == 1 ? "" : "s");
   6834 }
   6835 
   6836 /*
   6837  * wm_82547_txfifo_stall:
   6838  *
   6839  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6840  *	reset the FIFO pointers, and restart packet transmission.
   6841  */
   6842 static void
   6843 wm_82547_txfifo_stall(void *arg)
   6844 {
   6845 	struct wm_softc *sc = arg;
   6846 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6847 
   6848 	mutex_enter(txq->txq_lock);
   6849 
   6850 	if (txq->txq_stopping)
   6851 		goto out;
   6852 
   6853 	if (txq->txq_fifo_stall) {
   6854 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6855 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6856 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6857 			/*
   6858 			 * Packets have drained.  Stop transmitter, reset
   6859 			 * FIFO pointers, restart transmitter, and kick
   6860 			 * the packet queue.
   6861 			 */
   6862 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6863 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6864 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6865 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6866 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6867 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6868 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6869 			CSR_WRITE_FLUSH(sc);
   6870 
   6871 			txq->txq_fifo_head = 0;
   6872 			txq->txq_fifo_stall = 0;
   6873 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6874 		} else {
   6875 			/*
   6876 			 * Still waiting for packets to drain; try again in
   6877 			 * another tick.
   6878 			 */
   6879 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6880 		}
   6881 	}
   6882 
   6883 out:
   6884 	mutex_exit(txq->txq_lock);
   6885 }
   6886 
   6887 /*
   6888  * wm_82547_txfifo_bugchk:
   6889  *
   6890  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6891  *	prevent enqueueing a packet that would wrap around the end
   6892  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6893  *
   6894  *	We do this by checking the amount of space before the end
   6895  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6896  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6897  *	the internal FIFO pointers to the beginning, and restart
   6898  *	transmission on the interface.
   6899  */
   6900 #define	WM_FIFO_HDR		0x10
   6901 #define	WM_82547_PAD_LEN	0x3e0
   6902 static int
   6903 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6904 {
   6905 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6906 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6907 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6908 
   6909 	/* Just return if already stalled. */
   6910 	if (txq->txq_fifo_stall)
   6911 		return 1;
   6912 
   6913 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6914 		/* Stall only occurs in half-duplex mode. */
   6915 		goto send_packet;
   6916 	}
   6917 
   6918 	if (len >= WM_82547_PAD_LEN + space) {
   6919 		txq->txq_fifo_stall = 1;
   6920 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6921 		return 1;
   6922 	}
   6923 
   6924  send_packet:
   6925 	txq->txq_fifo_head += len;
   6926 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6927 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6928 
   6929 	return 0;
   6930 }
   6931 
   6932 static int
   6933 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6934 {
   6935 	int error;
   6936 
   6937 	/*
   6938 	 * Allocate the control data structures, and create and load the
   6939 	 * DMA map for it.
   6940 	 *
   6941 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6942 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6943 	 * both sets within the same 4G segment.
   6944 	 */
   6945 	if (sc->sc_type < WM_T_82544)
   6946 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6947 	else
   6948 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6949 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6950 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6951 	else
   6952 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6953 
   6954 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6955 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6956 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6957 		aprint_error_dev(sc->sc_dev,
   6958 		    "unable to allocate TX control data, error = %d\n",
   6959 		    error);
   6960 		goto fail_0;
   6961 	}
   6962 
   6963 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6964 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6965 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6966 		aprint_error_dev(sc->sc_dev,
   6967 		    "unable to map TX control data, error = %d\n", error);
   6968 		goto fail_1;
   6969 	}
   6970 
   6971 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6972 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6973 		aprint_error_dev(sc->sc_dev,
   6974 		    "unable to create TX control data DMA map, error = %d\n",
   6975 		    error);
   6976 		goto fail_2;
   6977 	}
   6978 
   6979 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6980 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6981 		aprint_error_dev(sc->sc_dev,
   6982 		    "unable to load TX control data DMA map, error = %d\n",
   6983 		    error);
   6984 		goto fail_3;
   6985 	}
   6986 
   6987 	return 0;
   6988 
   6989  fail_3:
   6990 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6991  fail_2:
   6992 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6993 	    WM_TXDESCS_SIZE(txq));
   6994  fail_1:
   6995 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6996  fail_0:
   6997 	return error;
   6998 }
   6999 
   7000 static void
   7001 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7002 {
   7003 
   7004 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7005 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7006 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7007 	    WM_TXDESCS_SIZE(txq));
   7008 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7009 }
   7010 
   7011 static int
   7012 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7013 {
   7014 	int error;
   7015 	size_t rxq_descs_size;
   7016 
   7017 	/*
   7018 	 * Allocate the control data structures, and create and load the
   7019 	 * DMA map for it.
   7020 	 *
   7021 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7022 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7023 	 * both sets within the same 4G segment.
   7024 	 */
   7025 	rxq->rxq_ndesc = WM_NRXDESC;
   7026 	if (sc->sc_type == WM_T_82574)
   7027 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7028 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7029 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7030 	else
   7031 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7032 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7033 
   7034 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7035 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7036 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7037 		aprint_error_dev(sc->sc_dev,
   7038 		    "unable to allocate RX control data, error = %d\n",
   7039 		    error);
   7040 		goto fail_0;
   7041 	}
   7042 
   7043 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7044 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7045 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7046 		aprint_error_dev(sc->sc_dev,
   7047 		    "unable to map RX control data, error = %d\n", error);
   7048 		goto fail_1;
   7049 	}
   7050 
   7051 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7052 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7053 		aprint_error_dev(sc->sc_dev,
   7054 		    "unable to create RX control data DMA map, error = %d\n",
   7055 		    error);
   7056 		goto fail_2;
   7057 	}
   7058 
   7059 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7060 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7061 		aprint_error_dev(sc->sc_dev,
   7062 		    "unable to load RX control data DMA map, error = %d\n",
   7063 		    error);
   7064 		goto fail_3;
   7065 	}
   7066 
   7067 	return 0;
   7068 
   7069  fail_3:
   7070 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7071  fail_2:
   7072 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7073 	    rxq_descs_size);
   7074  fail_1:
   7075 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7076  fail_0:
   7077 	return error;
   7078 }
   7079 
   7080 static void
   7081 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7082 {
   7083 
   7084 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7085 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7086 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7087 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7088 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7089 }
   7090 
   7091 
   7092 static int
   7093 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7094 {
   7095 	int i, error;
   7096 
   7097 	/* Create the transmit buffer DMA maps. */
   7098 	WM_TXQUEUELEN(txq) =
   7099 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7100 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7101 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7102 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7103 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7104 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7105 			aprint_error_dev(sc->sc_dev,
   7106 			    "unable to create Tx DMA map %d, error = %d\n",
   7107 			    i, error);
   7108 			goto fail;
   7109 		}
   7110 	}
   7111 
   7112 	return 0;
   7113 
   7114  fail:
   7115 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7116 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7117 			bus_dmamap_destroy(sc->sc_dmat,
   7118 			    txq->txq_soft[i].txs_dmamap);
   7119 	}
   7120 	return error;
   7121 }
   7122 
   7123 static void
   7124 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7125 {
   7126 	int i;
   7127 
   7128 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7129 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7130 			bus_dmamap_destroy(sc->sc_dmat,
   7131 			    txq->txq_soft[i].txs_dmamap);
   7132 	}
   7133 }
   7134 
   7135 static int
   7136 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7137 {
   7138 	int i, error;
   7139 
   7140 	/* Create the receive buffer DMA maps. */
   7141 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7142 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7143 			    MCLBYTES, 0, 0,
   7144 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7145 			aprint_error_dev(sc->sc_dev,
   7146 			    "unable to create Rx DMA map %d error = %d\n",
   7147 			    i, error);
   7148 			goto fail;
   7149 		}
   7150 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7151 	}
   7152 
   7153 	return 0;
   7154 
   7155  fail:
   7156 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7157 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7158 			bus_dmamap_destroy(sc->sc_dmat,
   7159 			    rxq->rxq_soft[i].rxs_dmamap);
   7160 	}
   7161 	return error;
   7162 }
   7163 
   7164 static void
   7165 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7166 {
   7167 	int i;
   7168 
   7169 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7170 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7171 			bus_dmamap_destroy(sc->sc_dmat,
   7172 			    rxq->rxq_soft[i].rxs_dmamap);
   7173 	}
   7174 }
   7175 
   7176 /*
   7177  * wm_alloc_quques:
   7178  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7179  */
   7180 static int
   7181 wm_alloc_txrx_queues(struct wm_softc *sc)
   7182 {
   7183 	int i, error, tx_done, rx_done;
   7184 
   7185 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7186 	    KM_SLEEP);
   7187 	if (sc->sc_queue == NULL) {
   7188 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7189 		error = ENOMEM;
   7190 		goto fail_0;
   7191 	}
   7192 
   7193 	/* For transmission */
   7194 	error = 0;
   7195 	tx_done = 0;
   7196 	for (i = 0; i < sc->sc_nqueues; i++) {
   7197 #ifdef WM_EVENT_COUNTERS
   7198 		int j;
   7199 		const char *xname;
   7200 #endif
   7201 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7202 		txq->txq_sc = sc;
   7203 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7204 
   7205 		error = wm_alloc_tx_descs(sc, txq);
   7206 		if (error)
   7207 			break;
   7208 		error = wm_alloc_tx_buffer(sc, txq);
   7209 		if (error) {
   7210 			wm_free_tx_descs(sc, txq);
   7211 			break;
   7212 		}
   7213 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7214 		if (txq->txq_interq == NULL) {
   7215 			wm_free_tx_descs(sc, txq);
   7216 			wm_free_tx_buffer(sc, txq);
   7217 			error = ENOMEM;
   7218 			break;
   7219 		}
   7220 
   7221 #ifdef WM_EVENT_COUNTERS
   7222 		xname = device_xname(sc->sc_dev);
   7223 
   7224 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7225 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7226 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7227 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7228 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7229 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7230 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7231 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7232 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7233 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7234 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7235 
   7236 		for (j = 0; j < WM_NTXSEGS; j++) {
   7237 			snprintf(txq->txq_txseg_evcnt_names[j],
   7238 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7239 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7240 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7241 		}
   7242 
   7243 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7244 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7245 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7246 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7247 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7248 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7249 #endif /* WM_EVENT_COUNTERS */
   7250 
   7251 		tx_done++;
   7252 	}
   7253 	if (error)
   7254 		goto fail_1;
   7255 
   7256 	/* For receive */
   7257 	error = 0;
   7258 	rx_done = 0;
   7259 	for (i = 0; i < sc->sc_nqueues; i++) {
   7260 #ifdef WM_EVENT_COUNTERS
   7261 		const char *xname;
   7262 #endif
   7263 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7264 		rxq->rxq_sc = sc;
   7265 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7266 
   7267 		error = wm_alloc_rx_descs(sc, rxq);
   7268 		if (error)
   7269 			break;
   7270 
   7271 		error = wm_alloc_rx_buffer(sc, rxq);
   7272 		if (error) {
   7273 			wm_free_rx_descs(sc, rxq);
   7274 			break;
   7275 		}
   7276 
   7277 #ifdef WM_EVENT_COUNTERS
   7278 		xname = device_xname(sc->sc_dev);
   7279 
   7280 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7281 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7282 
   7283 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7284 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7285 #endif /* WM_EVENT_COUNTERS */
   7286 
   7287 		rx_done++;
   7288 	}
   7289 	if (error)
   7290 		goto fail_2;
   7291 
   7292 	return 0;
   7293 
   7294  fail_2:
   7295 	for (i = 0; i < rx_done; i++) {
   7296 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7297 		wm_free_rx_buffer(sc, rxq);
   7298 		wm_free_rx_descs(sc, rxq);
   7299 		if (rxq->rxq_lock)
   7300 			mutex_obj_free(rxq->rxq_lock);
   7301 	}
   7302  fail_1:
   7303 	for (i = 0; i < tx_done; i++) {
   7304 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7305 		pcq_destroy(txq->txq_interq);
   7306 		wm_free_tx_buffer(sc, txq);
   7307 		wm_free_tx_descs(sc, txq);
   7308 		if (txq->txq_lock)
   7309 			mutex_obj_free(txq->txq_lock);
   7310 	}
   7311 
   7312 	kmem_free(sc->sc_queue,
   7313 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7314  fail_0:
   7315 	return error;
   7316 }
   7317 
   7318 /*
   7319  * wm_free_quques:
   7320  *	Free {tx,rx}descs and {tx,rx} buffers
   7321  */
   7322 static void
   7323 wm_free_txrx_queues(struct wm_softc *sc)
   7324 {
   7325 	int i;
   7326 
   7327 	for (i = 0; i < sc->sc_nqueues; i++) {
   7328 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7329 
   7330 #ifdef WM_EVENT_COUNTERS
   7331 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7332 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7333 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7334 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7335 #endif /* WM_EVENT_COUNTERS */
   7336 
   7337 		wm_free_rx_buffer(sc, rxq);
   7338 		wm_free_rx_descs(sc, rxq);
   7339 		if (rxq->rxq_lock)
   7340 			mutex_obj_free(rxq->rxq_lock);
   7341 	}
   7342 
   7343 	for (i = 0; i < sc->sc_nqueues; i++) {
   7344 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7345 		struct mbuf *m;
   7346 #ifdef WM_EVENT_COUNTERS
   7347 		int j;
   7348 
   7349 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7350 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7351 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7352 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7353 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7354 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7355 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7356 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7357 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7358 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7359 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7360 
   7361 		for (j = 0; j < WM_NTXSEGS; j++)
   7362 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7363 
   7364 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7365 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7366 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7367 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7368 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7369 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7370 #endif /* WM_EVENT_COUNTERS */
   7371 
   7372 		/* Drain txq_interq */
   7373 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7374 			m_freem(m);
   7375 		pcq_destroy(txq->txq_interq);
   7376 
   7377 		wm_free_tx_buffer(sc, txq);
   7378 		wm_free_tx_descs(sc, txq);
   7379 		if (txq->txq_lock)
   7380 			mutex_obj_free(txq->txq_lock);
   7381 	}
   7382 
   7383 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7384 }
   7385 
   7386 static void
   7387 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7388 {
   7389 
   7390 	KASSERT(mutex_owned(txq->txq_lock));
   7391 
   7392 	/* Initialize the transmit descriptor ring. */
   7393 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7394 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7395 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7396 	txq->txq_free = WM_NTXDESC(txq);
   7397 	txq->txq_next = 0;
   7398 }
   7399 
   7400 static void
   7401 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7402     struct wm_txqueue *txq)
   7403 {
   7404 
   7405 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7406 		device_xname(sc->sc_dev), __func__));
   7407 	KASSERT(mutex_owned(txq->txq_lock));
   7408 
   7409 	if (sc->sc_type < WM_T_82543) {
   7410 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7411 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7412 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7413 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7414 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7415 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7416 	} else {
   7417 		int qid = wmq->wmq_id;
   7418 
   7419 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7420 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7421 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7422 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7423 
   7424 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7425 			/*
   7426 			 * Don't write TDT before TCTL.EN is set.
   7427 			 * See the document.
   7428 			 */
   7429 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7430 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7431 			    | TXDCTL_WTHRESH(0));
   7432 		else {
   7433 			/* XXX should update with AIM? */
   7434 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7435 			if (sc->sc_type >= WM_T_82540) {
   7436 				/* Should be the same */
   7437 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7438 			}
   7439 
   7440 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7441 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7442 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7443 		}
   7444 	}
   7445 }
   7446 
   7447 static void
   7448 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7449 {
   7450 	int i;
   7451 
   7452 	KASSERT(mutex_owned(txq->txq_lock));
   7453 
   7454 	/* Initialize the transmit job descriptors. */
   7455 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7456 		txq->txq_soft[i].txs_mbuf = NULL;
   7457 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7458 	txq->txq_snext = 0;
   7459 	txq->txq_sdirty = 0;
   7460 }
   7461 
   7462 static void
   7463 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7464     struct wm_txqueue *txq)
   7465 {
   7466 
   7467 	KASSERT(mutex_owned(txq->txq_lock));
   7468 
   7469 	/*
   7470 	 * Set up some register offsets that are different between
   7471 	 * the i82542 and the i82543 and later chips.
   7472 	 */
   7473 	if (sc->sc_type < WM_T_82543)
   7474 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7475 	else
   7476 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7477 
   7478 	wm_init_tx_descs(sc, txq);
   7479 	wm_init_tx_regs(sc, wmq, txq);
   7480 	wm_init_tx_buffer(sc, txq);
   7481 
   7482 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7483 	txq->txq_sending = false;
   7484 }
   7485 
   7486 static void
   7487 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7488     struct wm_rxqueue *rxq)
   7489 {
   7490 
   7491 	KASSERT(mutex_owned(rxq->rxq_lock));
   7492 
   7493 	/*
   7494 	 * Initialize the receive descriptor and receive job
   7495 	 * descriptor rings.
   7496 	 */
   7497 	if (sc->sc_type < WM_T_82543) {
   7498 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7499 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7500 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7501 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7502 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7503 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7504 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7505 
   7506 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7507 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7508 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7509 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7510 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7511 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7512 	} else {
   7513 		int qid = wmq->wmq_id;
   7514 
   7515 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7516 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7517 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7518 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7519 
   7520 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7521 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7522 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7523 
   7524 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7525 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7526 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7527 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7528 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7529 			    | RXDCTL_WTHRESH(1));
   7530 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7531 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7532 		} else {
   7533 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7534 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7535 			/* XXX should update with AIM? */
   7536 			CSR_WRITE(sc, WMREG_RDTR,
   7537 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7538 			/* MUST be same */
   7539 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7540 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7541 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7542 		}
   7543 	}
   7544 }
   7545 
   7546 static int
   7547 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7548 {
   7549 	struct wm_rxsoft *rxs;
   7550 	int error, i;
   7551 
   7552 	KASSERT(mutex_owned(rxq->rxq_lock));
   7553 
   7554 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7555 		rxs = &rxq->rxq_soft[i];
   7556 		if (rxs->rxs_mbuf == NULL) {
   7557 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7558 				log(LOG_ERR, "%s: unable to allocate or map "
   7559 				    "rx buffer %d, error = %d\n",
   7560 				    device_xname(sc->sc_dev), i, error);
   7561 				/*
   7562 				 * XXX Should attempt to run with fewer receive
   7563 				 * XXX buffers instead of just failing.
   7564 				 */
   7565 				wm_rxdrain(rxq);
   7566 				return ENOMEM;
   7567 			}
   7568 		} else {
   7569 			/*
   7570 			 * For 82575 and 82576, the RX descriptors must be
   7571 			 * initialized after the setting of RCTL.EN in
   7572 			 * wm_set_filter()
   7573 			 */
   7574 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7575 				wm_init_rxdesc(rxq, i);
   7576 		}
   7577 	}
   7578 	rxq->rxq_ptr = 0;
   7579 	rxq->rxq_discard = 0;
   7580 	WM_RXCHAIN_RESET(rxq);
   7581 
   7582 	return 0;
   7583 }
   7584 
   7585 static int
   7586 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7587     struct wm_rxqueue *rxq)
   7588 {
   7589 
   7590 	KASSERT(mutex_owned(rxq->rxq_lock));
   7591 
   7592 	/*
   7593 	 * Set up some register offsets that are different between
   7594 	 * the i82542 and the i82543 and later chips.
   7595 	 */
   7596 	if (sc->sc_type < WM_T_82543)
   7597 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7598 	else
   7599 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7600 
   7601 	wm_init_rx_regs(sc, wmq, rxq);
   7602 	return wm_init_rx_buffer(sc, rxq);
   7603 }
   7604 
   7605 /*
   7606  * wm_init_quques:
   7607  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7608  */
   7609 static int
   7610 wm_init_txrx_queues(struct wm_softc *sc)
   7611 {
   7612 	int i, error = 0;
   7613 
   7614 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7615 		device_xname(sc->sc_dev), __func__));
   7616 
   7617 	for (i = 0; i < sc->sc_nqueues; i++) {
   7618 		struct wm_queue *wmq = &sc->sc_queue[i];
   7619 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7620 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7621 
   7622 		/*
   7623 		 * TODO
   7624 		 * Currently, use constant variable instead of AIM.
   7625 		 * Furthermore, the interrupt interval of multiqueue which use
   7626 		 * polling mode is less than default value.
   7627 		 * More tuning and AIM are required.
   7628 		 */
   7629 		if (wm_is_using_multiqueue(sc))
   7630 			wmq->wmq_itr = 50;
   7631 		else
   7632 			wmq->wmq_itr = sc->sc_itr_init;
   7633 		wmq->wmq_set_itr = true;
   7634 
   7635 		mutex_enter(txq->txq_lock);
   7636 		wm_init_tx_queue(sc, wmq, txq);
   7637 		mutex_exit(txq->txq_lock);
   7638 
   7639 		mutex_enter(rxq->rxq_lock);
   7640 		error = wm_init_rx_queue(sc, wmq, rxq);
   7641 		mutex_exit(rxq->rxq_lock);
   7642 		if (error)
   7643 			break;
   7644 	}
   7645 
   7646 	return error;
   7647 }
   7648 
   7649 /*
   7650  * wm_tx_offload:
   7651  *
   7652  *	Set up TCP/IP checksumming parameters for the
   7653  *	specified packet.
   7654  */
   7655 static void
   7656 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7657     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7658 {
   7659 	struct mbuf *m0 = txs->txs_mbuf;
   7660 	struct livengood_tcpip_ctxdesc *t;
   7661 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7662 	uint32_t ipcse;
   7663 	struct ether_header *eh;
   7664 	int offset, iphl;
   7665 	uint8_t fields;
   7666 
   7667 	/*
   7668 	 * XXX It would be nice if the mbuf pkthdr had offset
   7669 	 * fields for the protocol headers.
   7670 	 */
   7671 
   7672 	eh = mtod(m0, struct ether_header *);
   7673 	switch (htons(eh->ether_type)) {
   7674 	case ETHERTYPE_IP:
   7675 	case ETHERTYPE_IPV6:
   7676 		offset = ETHER_HDR_LEN;
   7677 		break;
   7678 
   7679 	case ETHERTYPE_VLAN:
   7680 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7681 		break;
   7682 
   7683 	default:
   7684 		/* Don't support this protocol or encapsulation. */
   7685 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7686 		txq->txq_last_hw_ipcs = 0;
   7687 		txq->txq_last_hw_tucs = 0;
   7688 		*fieldsp = 0;
   7689 		*cmdp = 0;
   7690 		return;
   7691 	}
   7692 
   7693 	if ((m0->m_pkthdr.csum_flags &
   7694 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7695 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7696 	} else
   7697 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7698 
   7699 	ipcse = offset + iphl - 1;
   7700 
   7701 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7702 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7703 	seg = 0;
   7704 	fields = 0;
   7705 
   7706 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7707 		int hlen = offset + iphl;
   7708 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7709 
   7710 		if (__predict_false(m0->m_len <
   7711 				    (hlen + sizeof(struct tcphdr)))) {
   7712 			/*
   7713 			 * TCP/IP headers are not in the first mbuf; we need
   7714 			 * to do this the slow and painful way. Let's just
   7715 			 * hope this doesn't happen very often.
   7716 			 */
   7717 			struct tcphdr th;
   7718 
   7719 			WM_Q_EVCNT_INCR(txq, tsopain);
   7720 
   7721 			m_copydata(m0, hlen, sizeof(th), &th);
   7722 			if (v4) {
   7723 				struct ip ip;
   7724 
   7725 				m_copydata(m0, offset, sizeof(ip), &ip);
   7726 				ip.ip_len = 0;
   7727 				m_copyback(m0,
   7728 				    offset + offsetof(struct ip, ip_len),
   7729 				    sizeof(ip.ip_len), &ip.ip_len);
   7730 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7731 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7732 			} else {
   7733 				struct ip6_hdr ip6;
   7734 
   7735 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7736 				ip6.ip6_plen = 0;
   7737 				m_copyback(m0,
   7738 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7739 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7740 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7741 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7742 			}
   7743 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7744 			    sizeof(th.th_sum), &th.th_sum);
   7745 
   7746 			hlen += th.th_off << 2;
   7747 		} else {
   7748 			/*
   7749 			 * TCP/IP headers are in the first mbuf; we can do
   7750 			 * this the easy way.
   7751 			 */
   7752 			struct tcphdr *th;
   7753 
   7754 			if (v4) {
   7755 				struct ip *ip =
   7756 				    (void *)(mtod(m0, char *) + offset);
   7757 				th = (void *)(mtod(m0, char *) + hlen);
   7758 
   7759 				ip->ip_len = 0;
   7760 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7761 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7762 			} else {
   7763 				struct ip6_hdr *ip6 =
   7764 				    (void *)(mtod(m0, char *) + offset);
   7765 				th = (void *)(mtod(m0, char *) + hlen);
   7766 
   7767 				ip6->ip6_plen = 0;
   7768 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7769 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7770 			}
   7771 			hlen += th->th_off << 2;
   7772 		}
   7773 
   7774 		if (v4) {
   7775 			WM_Q_EVCNT_INCR(txq, tso);
   7776 			cmdlen |= WTX_TCPIP_CMD_IP;
   7777 		} else {
   7778 			WM_Q_EVCNT_INCR(txq, tso6);
   7779 			ipcse = 0;
   7780 		}
   7781 		cmd |= WTX_TCPIP_CMD_TSE;
   7782 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7783 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7784 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7785 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7786 	}
   7787 
   7788 	/*
   7789 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7790 	 * offload feature, if we load the context descriptor, we
   7791 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7792 	 */
   7793 
   7794 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7795 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7796 	    WTX_TCPIP_IPCSE(ipcse);
   7797 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7798 		WM_Q_EVCNT_INCR(txq, ipsum);
   7799 		fields |= WTX_IXSM;
   7800 	}
   7801 
   7802 	offset += iphl;
   7803 
   7804 	if (m0->m_pkthdr.csum_flags &
   7805 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7806 		WM_Q_EVCNT_INCR(txq, tusum);
   7807 		fields |= WTX_TXSM;
   7808 		tucs = WTX_TCPIP_TUCSS(offset) |
   7809 		    WTX_TCPIP_TUCSO(offset +
   7810 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7811 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7812 	} else if ((m0->m_pkthdr.csum_flags &
   7813 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7814 		WM_Q_EVCNT_INCR(txq, tusum6);
   7815 		fields |= WTX_TXSM;
   7816 		tucs = WTX_TCPIP_TUCSS(offset) |
   7817 		    WTX_TCPIP_TUCSO(offset +
   7818 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7819 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7820 	} else {
   7821 		/* Just initialize it to a valid TCP context. */
   7822 		tucs = WTX_TCPIP_TUCSS(offset) |
   7823 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7824 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7825 	}
   7826 
   7827 	*cmdp = cmd;
   7828 	*fieldsp = fields;
   7829 
   7830 	/*
   7831 	 * We don't have to write context descriptor for every packet
   7832 	 * except for 82574. For 82574, we must write context descriptor
   7833 	 * for every packet when we use two descriptor queues.
   7834 	 *
   7835 	 * The 82574L can only remember the *last* context used
   7836 	 * regardless of queue that it was use for.  We cannot reuse
   7837 	 * contexts on this hardware platform and must generate a new
   7838 	 * context every time.  82574L hardware spec, section 7.2.6,
   7839 	 * second note.
   7840 	 */
   7841 	if (sc->sc_nqueues < 2) {
   7842 		/*
   7843 		 * Setting up new checksum offload context for every
   7844 		 * frames takes a lot of processing time for hardware.
   7845 		 * This also reduces performance a lot for small sized
   7846 		 * frames so avoid it if driver can use previously
   7847 		 * configured checksum offload context.
   7848 		 * For TSO, in theory we can use the same TSO context only if
   7849 		 * frame is the same type(IP/TCP) and the same MSS. However
   7850 		 * checking whether a frame has the same IP/TCP structure is
   7851 		 * hard thing so just ignore that and always restablish a
   7852 		 * new TSO context.
   7853 		 */
   7854 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7855 		    == 0) {
   7856 			if (txq->txq_last_hw_cmd == cmd &&
   7857 			    txq->txq_last_hw_fields == fields &&
   7858 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7859 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7860 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7861 				return;
   7862 			}
   7863 		}
   7864 
   7865 		txq->txq_last_hw_cmd = cmd;
   7866 		txq->txq_last_hw_fields = fields;
   7867 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7868 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7869 	}
   7870 
   7871 	/* Fill in the context descriptor. */
   7872 	t = (struct livengood_tcpip_ctxdesc *)
   7873 	    &txq->txq_descs[txq->txq_next];
   7874 	t->tcpip_ipcs = htole32(ipcs);
   7875 	t->tcpip_tucs = htole32(tucs);
   7876 	t->tcpip_cmdlen = htole32(cmdlen);
   7877 	t->tcpip_seg = htole32(seg);
   7878 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7879 
   7880 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7881 	txs->txs_ndesc++;
   7882 }
   7883 
   7884 static inline int
   7885 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7886 {
   7887 	struct wm_softc *sc = ifp->if_softc;
   7888 	u_int cpuid = cpu_index(curcpu());
   7889 
   7890 	/*
   7891 	 * Currently, simple distribute strategy.
   7892 	 * TODO:
   7893 	 * distribute by flowid(RSS has value).
   7894 	 */
   7895 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7896 }
   7897 
   7898 static inline bool
   7899 wm_linkdown_discard(struct wm_txqueue *txq)
   7900 {
   7901 
   7902 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   7903 		return true;
   7904 
   7905 	return false;
   7906 }
   7907 
   7908 /*
   7909  * wm_start:		[ifnet interface function]
   7910  *
   7911  *	Start packet transmission on the interface.
   7912  */
   7913 static void
   7914 wm_start(struct ifnet *ifp)
   7915 {
   7916 	struct wm_softc *sc = ifp->if_softc;
   7917 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7918 
   7919 #ifdef WM_MPSAFE
   7920 	KASSERT(if_is_mpsafe(ifp));
   7921 #endif
   7922 	/*
   7923 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7924 	 */
   7925 
   7926 	mutex_enter(txq->txq_lock);
   7927 	if (!txq->txq_stopping)
   7928 		wm_start_locked(ifp);
   7929 	mutex_exit(txq->txq_lock);
   7930 }
   7931 
   7932 static void
   7933 wm_start_locked(struct ifnet *ifp)
   7934 {
   7935 	struct wm_softc *sc = ifp->if_softc;
   7936 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7937 
   7938 	wm_send_common_locked(ifp, txq, false);
   7939 }
   7940 
   7941 static int
   7942 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7943 {
   7944 	int qid;
   7945 	struct wm_softc *sc = ifp->if_softc;
   7946 	struct wm_txqueue *txq;
   7947 
   7948 	qid = wm_select_txqueue(ifp, m);
   7949 	txq = &sc->sc_queue[qid].wmq_txq;
   7950 
   7951 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7952 		m_freem(m);
   7953 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7954 		return ENOBUFS;
   7955 	}
   7956 
   7957 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7958 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7959 	if (m->m_flags & M_MCAST)
   7960 		if_statinc_ref(nsr, if_omcasts);
   7961 	IF_STAT_PUTREF(ifp);
   7962 
   7963 	if (mutex_tryenter(txq->txq_lock)) {
   7964 		if (!txq->txq_stopping)
   7965 			wm_transmit_locked(ifp, txq);
   7966 		mutex_exit(txq->txq_lock);
   7967 	}
   7968 
   7969 	return 0;
   7970 }
   7971 
   7972 static void
   7973 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7974 {
   7975 
   7976 	wm_send_common_locked(ifp, txq, true);
   7977 }
   7978 
   7979 static void
   7980 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7981     bool is_transmit)
   7982 {
   7983 	struct wm_softc *sc = ifp->if_softc;
   7984 	struct mbuf *m0;
   7985 	struct wm_txsoft *txs;
   7986 	bus_dmamap_t dmamap;
   7987 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7988 	bus_addr_t curaddr;
   7989 	bus_size_t seglen, curlen;
   7990 	uint32_t cksumcmd;
   7991 	uint8_t cksumfields;
   7992 	bool remap = true;
   7993 
   7994 	KASSERT(mutex_owned(txq->txq_lock));
   7995 
   7996 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7997 		return;
   7998 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7999 		return;
   8000 
   8001 	if (__predict_false(wm_linkdown_discard(txq))) {
   8002 		do {
   8003 			if (is_transmit)
   8004 				m0 = pcq_get(txq->txq_interq);
   8005 			else
   8006 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8007 			/*
   8008 			 * increment successed packet counter as in the case
   8009 			 * which the packet is discarded by link down PHY.
   8010 			 */
   8011 			if (m0 != NULL)
   8012 				if_statinc(ifp, if_opackets);
   8013 			m_freem(m0);
   8014 		} while (m0 != NULL);
   8015 		return;
   8016 	}
   8017 
   8018 	/* Remember the previous number of free descriptors. */
   8019 	ofree = txq->txq_free;
   8020 
   8021 	/*
   8022 	 * Loop through the send queue, setting up transmit descriptors
   8023 	 * until we drain the queue, or use up all available transmit
   8024 	 * descriptors.
   8025 	 */
   8026 	for (;;) {
   8027 		m0 = NULL;
   8028 
   8029 		/* Get a work queue entry. */
   8030 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8031 			wm_txeof(txq, UINT_MAX);
   8032 			if (txq->txq_sfree == 0) {
   8033 				DPRINTF(sc, WM_DEBUG_TX,
   8034 				    ("%s: TX: no free job descriptors\n",
   8035 					device_xname(sc->sc_dev)));
   8036 				WM_Q_EVCNT_INCR(txq, txsstall);
   8037 				break;
   8038 			}
   8039 		}
   8040 
   8041 		/* Grab a packet off the queue. */
   8042 		if (is_transmit)
   8043 			m0 = pcq_get(txq->txq_interq);
   8044 		else
   8045 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8046 		if (m0 == NULL)
   8047 			break;
   8048 
   8049 		DPRINTF(sc, WM_DEBUG_TX,
   8050 		    ("%s: TX: have packet to transmit: %p\n",
   8051 			device_xname(sc->sc_dev), m0));
   8052 
   8053 		txs = &txq->txq_soft[txq->txq_snext];
   8054 		dmamap = txs->txs_dmamap;
   8055 
   8056 		use_tso = (m0->m_pkthdr.csum_flags &
   8057 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8058 
   8059 		/*
   8060 		 * So says the Linux driver:
   8061 		 * The controller does a simple calculation to make sure
   8062 		 * there is enough room in the FIFO before initiating the
   8063 		 * DMA for each buffer. The calc is:
   8064 		 *	4 = ceil(buffer len / MSS)
   8065 		 * To make sure we don't overrun the FIFO, adjust the max
   8066 		 * buffer len if the MSS drops.
   8067 		 */
   8068 		dmamap->dm_maxsegsz =
   8069 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8070 		    ? m0->m_pkthdr.segsz << 2
   8071 		    : WTX_MAX_LEN;
   8072 
   8073 		/*
   8074 		 * Load the DMA map.  If this fails, the packet either
   8075 		 * didn't fit in the allotted number of segments, or we
   8076 		 * were short on resources.  For the too-many-segments
   8077 		 * case, we simply report an error and drop the packet,
   8078 		 * since we can't sanely copy a jumbo packet to a single
   8079 		 * buffer.
   8080 		 */
   8081 retry:
   8082 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8083 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8084 		if (__predict_false(error)) {
   8085 			if (error == EFBIG) {
   8086 				if (remap == true) {
   8087 					struct mbuf *m;
   8088 
   8089 					remap = false;
   8090 					m = m_defrag(m0, M_NOWAIT);
   8091 					if (m != NULL) {
   8092 						WM_Q_EVCNT_INCR(txq, defrag);
   8093 						m0 = m;
   8094 						goto retry;
   8095 					}
   8096 				}
   8097 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8098 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8099 				    "DMA segments, dropping...\n",
   8100 				    device_xname(sc->sc_dev));
   8101 				wm_dump_mbuf_chain(sc, m0);
   8102 				m_freem(m0);
   8103 				continue;
   8104 			}
   8105 			/* Short on resources, just stop for now. */
   8106 			DPRINTF(sc, WM_DEBUG_TX,
   8107 			    ("%s: TX: dmamap load failed: %d\n",
   8108 				device_xname(sc->sc_dev), error));
   8109 			break;
   8110 		}
   8111 
   8112 		segs_needed = dmamap->dm_nsegs;
   8113 		if (use_tso) {
   8114 			/* For sentinel descriptor; see below. */
   8115 			segs_needed++;
   8116 		}
   8117 
   8118 		/*
   8119 		 * Ensure we have enough descriptors free to describe
   8120 		 * the packet. Note, we always reserve one descriptor
   8121 		 * at the end of the ring due to the semantics of the
   8122 		 * TDT register, plus one more in the event we need
   8123 		 * to load offload context.
   8124 		 */
   8125 		if (segs_needed > txq->txq_free - 2) {
   8126 			/*
   8127 			 * Not enough free descriptors to transmit this
   8128 			 * packet.  We haven't committed anything yet,
   8129 			 * so just unload the DMA map, put the packet
   8130 			 * pack on the queue, and punt. Notify the upper
   8131 			 * layer that there are no more slots left.
   8132 			 */
   8133 			DPRINTF(sc, WM_DEBUG_TX,
   8134 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8135 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8136 				segs_needed, txq->txq_free - 1));
   8137 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8138 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8139 			WM_Q_EVCNT_INCR(txq, txdstall);
   8140 			break;
   8141 		}
   8142 
   8143 		/*
   8144 		 * Check for 82547 Tx FIFO bug. We need to do this
   8145 		 * once we know we can transmit the packet, since we
   8146 		 * do some internal FIFO space accounting here.
   8147 		 */
   8148 		if (sc->sc_type == WM_T_82547 &&
   8149 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8150 			DPRINTF(sc, WM_DEBUG_TX,
   8151 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8152 				device_xname(sc->sc_dev)));
   8153 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8154 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8155 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8156 			break;
   8157 		}
   8158 
   8159 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8160 
   8161 		DPRINTF(sc, WM_DEBUG_TX,
   8162 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8163 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8164 
   8165 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8166 
   8167 		/*
   8168 		 * Store a pointer to the packet so that we can free it
   8169 		 * later.
   8170 		 *
   8171 		 * Initially, we consider the number of descriptors the
   8172 		 * packet uses the number of DMA segments.  This may be
   8173 		 * incremented by 1 if we do checksum offload (a descriptor
   8174 		 * is used to set the checksum context).
   8175 		 */
   8176 		txs->txs_mbuf = m0;
   8177 		txs->txs_firstdesc = txq->txq_next;
   8178 		txs->txs_ndesc = segs_needed;
   8179 
   8180 		/* Set up offload parameters for this packet. */
   8181 		if (m0->m_pkthdr.csum_flags &
   8182 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8183 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8184 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8185 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8186 		} else {
   8187 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8188 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8189 			cksumcmd = 0;
   8190 			cksumfields = 0;
   8191 		}
   8192 
   8193 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8194 
   8195 		/* Sync the DMA map. */
   8196 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8197 		    BUS_DMASYNC_PREWRITE);
   8198 
   8199 		/* Initialize the transmit descriptor. */
   8200 		for (nexttx = txq->txq_next, seg = 0;
   8201 		     seg < dmamap->dm_nsegs; seg++) {
   8202 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8203 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8204 			     seglen != 0;
   8205 			     curaddr += curlen, seglen -= curlen,
   8206 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8207 				curlen = seglen;
   8208 
   8209 				/*
   8210 				 * So says the Linux driver:
   8211 				 * Work around for premature descriptor
   8212 				 * write-backs in TSO mode.  Append a
   8213 				 * 4-byte sentinel descriptor.
   8214 				 */
   8215 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8216 				    curlen > 8)
   8217 					curlen -= 4;
   8218 
   8219 				wm_set_dma_addr(
   8220 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8221 				txq->txq_descs[nexttx].wtx_cmdlen
   8222 				    = htole32(cksumcmd | curlen);
   8223 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8224 				    = 0;
   8225 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8226 				    = cksumfields;
   8227 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8228 				lasttx = nexttx;
   8229 
   8230 				DPRINTF(sc, WM_DEBUG_TX,
   8231 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8232 					"len %#04zx\n",
   8233 					device_xname(sc->sc_dev), nexttx,
   8234 					(uint64_t)curaddr, curlen));
   8235 			}
   8236 		}
   8237 
   8238 		KASSERT(lasttx != -1);
   8239 
   8240 		/*
   8241 		 * Set up the command byte on the last descriptor of
   8242 		 * the packet. If we're in the interrupt delay window,
   8243 		 * delay the interrupt.
   8244 		 */
   8245 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8246 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8247 
   8248 		/*
   8249 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8250 		 * up the descriptor to encapsulate the packet for us.
   8251 		 *
   8252 		 * This is only valid on the last descriptor of the packet.
   8253 		 */
   8254 		if (vlan_has_tag(m0)) {
   8255 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8256 			    htole32(WTX_CMD_VLE);
   8257 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8258 			    = htole16(vlan_get_tag(m0));
   8259 		}
   8260 
   8261 		txs->txs_lastdesc = lasttx;
   8262 
   8263 		DPRINTF(sc, WM_DEBUG_TX,
   8264 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8265 			device_xname(sc->sc_dev),
   8266 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8267 
   8268 		/* Sync the descriptors we're using. */
   8269 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8270 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8271 
   8272 		/* Give the packet to the chip. */
   8273 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8274 
   8275 		DPRINTF(sc, WM_DEBUG_TX,
   8276 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8277 
   8278 		DPRINTF(sc, WM_DEBUG_TX,
   8279 		    ("%s: TX: finished transmitting packet, job %d\n",
   8280 			device_xname(sc->sc_dev), txq->txq_snext));
   8281 
   8282 		/* Advance the tx pointer. */
   8283 		txq->txq_free -= txs->txs_ndesc;
   8284 		txq->txq_next = nexttx;
   8285 
   8286 		txq->txq_sfree--;
   8287 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8288 
   8289 		/* Pass the packet to any BPF listeners. */
   8290 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8291 	}
   8292 
   8293 	if (m0 != NULL) {
   8294 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8295 		WM_Q_EVCNT_INCR(txq, descdrop);
   8296 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8297 			__func__));
   8298 		m_freem(m0);
   8299 	}
   8300 
   8301 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8302 		/* No more slots; notify upper layer. */
   8303 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8304 	}
   8305 
   8306 	if (txq->txq_free != ofree) {
   8307 		/* Set a watchdog timer in case the chip flakes out. */
   8308 		txq->txq_lastsent = time_uptime;
   8309 		txq->txq_sending = true;
   8310 	}
   8311 }
   8312 
   8313 /*
   8314  * wm_nq_tx_offload:
   8315  *
   8316  *	Set up TCP/IP checksumming parameters for the
   8317  *	specified packet, for NEWQUEUE devices
   8318  */
   8319 static void
   8320 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8321     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8322 {
   8323 	struct mbuf *m0 = txs->txs_mbuf;
   8324 	uint32_t vl_len, mssidx, cmdc;
   8325 	struct ether_header *eh;
   8326 	int offset, iphl;
   8327 
   8328 	/*
   8329 	 * XXX It would be nice if the mbuf pkthdr had offset
   8330 	 * fields for the protocol headers.
   8331 	 */
   8332 	*cmdlenp = 0;
   8333 	*fieldsp = 0;
   8334 
   8335 	eh = mtod(m0, struct ether_header *);
   8336 	switch (htons(eh->ether_type)) {
   8337 	case ETHERTYPE_IP:
   8338 	case ETHERTYPE_IPV6:
   8339 		offset = ETHER_HDR_LEN;
   8340 		break;
   8341 
   8342 	case ETHERTYPE_VLAN:
   8343 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8344 		break;
   8345 
   8346 	default:
   8347 		/* Don't support this protocol or encapsulation. */
   8348 		*do_csum = false;
   8349 		return;
   8350 	}
   8351 	*do_csum = true;
   8352 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8353 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8354 
   8355 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8356 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8357 
   8358 	if ((m0->m_pkthdr.csum_flags &
   8359 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8360 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8361 	} else {
   8362 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8363 	}
   8364 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8365 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8366 
   8367 	if (vlan_has_tag(m0)) {
   8368 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8369 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8370 		*cmdlenp |= NQTX_CMD_VLE;
   8371 	}
   8372 
   8373 	mssidx = 0;
   8374 
   8375 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8376 		int hlen = offset + iphl;
   8377 		int tcp_hlen;
   8378 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8379 
   8380 		if (__predict_false(m0->m_len <
   8381 				    (hlen + sizeof(struct tcphdr)))) {
   8382 			/*
   8383 			 * TCP/IP headers are not in the first mbuf; we need
   8384 			 * to do this the slow and painful way. Let's just
   8385 			 * hope this doesn't happen very often.
   8386 			 */
   8387 			struct tcphdr th;
   8388 
   8389 			WM_Q_EVCNT_INCR(txq, tsopain);
   8390 
   8391 			m_copydata(m0, hlen, sizeof(th), &th);
   8392 			if (v4) {
   8393 				struct ip ip;
   8394 
   8395 				m_copydata(m0, offset, sizeof(ip), &ip);
   8396 				ip.ip_len = 0;
   8397 				m_copyback(m0,
   8398 				    offset + offsetof(struct ip, ip_len),
   8399 				    sizeof(ip.ip_len), &ip.ip_len);
   8400 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8401 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8402 			} else {
   8403 				struct ip6_hdr ip6;
   8404 
   8405 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8406 				ip6.ip6_plen = 0;
   8407 				m_copyback(m0,
   8408 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8409 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8410 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8411 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8412 			}
   8413 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8414 			    sizeof(th.th_sum), &th.th_sum);
   8415 
   8416 			tcp_hlen = th.th_off << 2;
   8417 		} else {
   8418 			/*
   8419 			 * TCP/IP headers are in the first mbuf; we can do
   8420 			 * this the easy way.
   8421 			 */
   8422 			struct tcphdr *th;
   8423 
   8424 			if (v4) {
   8425 				struct ip *ip =
   8426 				    (void *)(mtod(m0, char *) + offset);
   8427 				th = (void *)(mtod(m0, char *) + hlen);
   8428 
   8429 				ip->ip_len = 0;
   8430 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8431 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8432 			} else {
   8433 				struct ip6_hdr *ip6 =
   8434 				    (void *)(mtod(m0, char *) + offset);
   8435 				th = (void *)(mtod(m0, char *) + hlen);
   8436 
   8437 				ip6->ip6_plen = 0;
   8438 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8439 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8440 			}
   8441 			tcp_hlen = th->th_off << 2;
   8442 		}
   8443 		hlen += tcp_hlen;
   8444 		*cmdlenp |= NQTX_CMD_TSE;
   8445 
   8446 		if (v4) {
   8447 			WM_Q_EVCNT_INCR(txq, tso);
   8448 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8449 		} else {
   8450 			WM_Q_EVCNT_INCR(txq, tso6);
   8451 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8452 		}
   8453 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8454 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8455 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8456 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8457 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8458 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8459 	} else {
   8460 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8461 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8462 	}
   8463 
   8464 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8465 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8466 		cmdc |= NQTXC_CMD_IP4;
   8467 	}
   8468 
   8469 	if (m0->m_pkthdr.csum_flags &
   8470 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8471 		WM_Q_EVCNT_INCR(txq, tusum);
   8472 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8473 			cmdc |= NQTXC_CMD_TCP;
   8474 		else
   8475 			cmdc |= NQTXC_CMD_UDP;
   8476 
   8477 		cmdc |= NQTXC_CMD_IP4;
   8478 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8479 	}
   8480 	if (m0->m_pkthdr.csum_flags &
   8481 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8482 		WM_Q_EVCNT_INCR(txq, tusum6);
   8483 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8484 			cmdc |= NQTXC_CMD_TCP;
   8485 		else
   8486 			cmdc |= NQTXC_CMD_UDP;
   8487 
   8488 		cmdc |= NQTXC_CMD_IP6;
   8489 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8490 	}
   8491 
   8492 	/*
   8493 	 * We don't have to write context descriptor for every packet to
   8494 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8495 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8496 	 * controllers.
   8497 	 * It would be overhead to write context descriptor for every packet,
   8498 	 * however it does not cause problems.
   8499 	 */
   8500 	/* Fill in the context descriptor. */
   8501 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8502 	    htole32(vl_len);
   8503 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8504 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8505 	    htole32(cmdc);
   8506 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8507 	    htole32(mssidx);
   8508 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8509 	DPRINTF(sc, WM_DEBUG_TX,
   8510 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8511 		txq->txq_next, 0, vl_len));
   8512 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8513 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8514 	txs->txs_ndesc++;
   8515 }
   8516 
   8517 /*
   8518  * wm_nq_start:		[ifnet interface function]
   8519  *
   8520  *	Start packet transmission on the interface for NEWQUEUE devices
   8521  */
   8522 static void
   8523 wm_nq_start(struct ifnet *ifp)
   8524 {
   8525 	struct wm_softc *sc = ifp->if_softc;
   8526 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8527 
   8528 #ifdef WM_MPSAFE
   8529 	KASSERT(if_is_mpsafe(ifp));
   8530 #endif
   8531 	/*
   8532 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8533 	 */
   8534 
   8535 	mutex_enter(txq->txq_lock);
   8536 	if (!txq->txq_stopping)
   8537 		wm_nq_start_locked(ifp);
   8538 	mutex_exit(txq->txq_lock);
   8539 }
   8540 
   8541 static void
   8542 wm_nq_start_locked(struct ifnet *ifp)
   8543 {
   8544 	struct wm_softc *sc = ifp->if_softc;
   8545 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8546 
   8547 	wm_nq_send_common_locked(ifp, txq, false);
   8548 }
   8549 
   8550 static int
   8551 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8552 {
   8553 	int qid;
   8554 	struct wm_softc *sc = ifp->if_softc;
   8555 	struct wm_txqueue *txq;
   8556 
   8557 	qid = wm_select_txqueue(ifp, m);
   8558 	txq = &sc->sc_queue[qid].wmq_txq;
   8559 
   8560 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8561 		m_freem(m);
   8562 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8563 		return ENOBUFS;
   8564 	}
   8565 
   8566 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8567 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8568 	if (m->m_flags & M_MCAST)
   8569 		if_statinc_ref(nsr, if_omcasts);
   8570 	IF_STAT_PUTREF(ifp);
   8571 
   8572 	/*
   8573 	 * The situations which this mutex_tryenter() fails at running time
   8574 	 * are below two patterns.
   8575 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8576 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8577 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8578 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8579 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8580 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8581 	 * stuck, either.
   8582 	 */
   8583 	if (mutex_tryenter(txq->txq_lock)) {
   8584 		if (!txq->txq_stopping)
   8585 			wm_nq_transmit_locked(ifp, txq);
   8586 		mutex_exit(txq->txq_lock);
   8587 	}
   8588 
   8589 	return 0;
   8590 }
   8591 
   8592 static void
   8593 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8594 {
   8595 
   8596 	wm_nq_send_common_locked(ifp, txq, true);
   8597 }
   8598 
   8599 static void
   8600 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8601     bool is_transmit)
   8602 {
   8603 	struct wm_softc *sc = ifp->if_softc;
   8604 	struct mbuf *m0;
   8605 	struct wm_txsoft *txs;
   8606 	bus_dmamap_t dmamap;
   8607 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8608 	bool do_csum, sent;
   8609 	bool remap = true;
   8610 
   8611 	KASSERT(mutex_owned(txq->txq_lock));
   8612 
   8613 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8614 		return;
   8615 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8616 		return;
   8617 
   8618 	if (__predict_false(wm_linkdown_discard(txq))) {
   8619 		do {
   8620 			if (is_transmit)
   8621 				m0 = pcq_get(txq->txq_interq);
   8622 			else
   8623 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8624 			/*
   8625 			 * increment successed packet counter as in the case
   8626 			 * which the packet is discarded by link down PHY.
   8627 			 */
   8628 			if (m0 != NULL)
   8629 				if_statinc(ifp, if_opackets);
   8630 			m_freem(m0);
   8631 		} while (m0 != NULL);
   8632 		return;
   8633 	}
   8634 
   8635 	sent = false;
   8636 
   8637 	/*
   8638 	 * Loop through the send queue, setting up transmit descriptors
   8639 	 * until we drain the queue, or use up all available transmit
   8640 	 * descriptors.
   8641 	 */
   8642 	for (;;) {
   8643 		m0 = NULL;
   8644 
   8645 		/* Get a work queue entry. */
   8646 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8647 			wm_txeof(txq, UINT_MAX);
   8648 			if (txq->txq_sfree == 0) {
   8649 				DPRINTF(sc, WM_DEBUG_TX,
   8650 				    ("%s: TX: no free job descriptors\n",
   8651 					device_xname(sc->sc_dev)));
   8652 				WM_Q_EVCNT_INCR(txq, txsstall);
   8653 				break;
   8654 			}
   8655 		}
   8656 
   8657 		/* Grab a packet off the queue. */
   8658 		if (is_transmit)
   8659 			m0 = pcq_get(txq->txq_interq);
   8660 		else
   8661 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8662 		if (m0 == NULL)
   8663 			break;
   8664 
   8665 		DPRINTF(sc, WM_DEBUG_TX,
   8666 		    ("%s: TX: have packet to transmit: %p\n",
   8667 		    device_xname(sc->sc_dev), m0));
   8668 
   8669 		txs = &txq->txq_soft[txq->txq_snext];
   8670 		dmamap = txs->txs_dmamap;
   8671 
   8672 		/*
   8673 		 * Load the DMA map.  If this fails, the packet either
   8674 		 * didn't fit in the allotted number of segments, or we
   8675 		 * were short on resources.  For the too-many-segments
   8676 		 * case, we simply report an error and drop the packet,
   8677 		 * since we can't sanely copy a jumbo packet to a single
   8678 		 * buffer.
   8679 		 */
   8680 retry:
   8681 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8682 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8683 		if (__predict_false(error)) {
   8684 			if (error == EFBIG) {
   8685 				if (remap == true) {
   8686 					struct mbuf *m;
   8687 
   8688 					remap = false;
   8689 					m = m_defrag(m0, M_NOWAIT);
   8690 					if (m != NULL) {
   8691 						WM_Q_EVCNT_INCR(txq, defrag);
   8692 						m0 = m;
   8693 						goto retry;
   8694 					}
   8695 				}
   8696 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8697 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8698 				    "DMA segments, dropping...\n",
   8699 				    device_xname(sc->sc_dev));
   8700 				wm_dump_mbuf_chain(sc, m0);
   8701 				m_freem(m0);
   8702 				continue;
   8703 			}
   8704 			/* Short on resources, just stop for now. */
   8705 			DPRINTF(sc, WM_DEBUG_TX,
   8706 			    ("%s: TX: dmamap load failed: %d\n",
   8707 				device_xname(sc->sc_dev), error));
   8708 			break;
   8709 		}
   8710 
   8711 		segs_needed = dmamap->dm_nsegs;
   8712 
   8713 		/*
   8714 		 * Ensure we have enough descriptors free to describe
   8715 		 * the packet. Note, we always reserve one descriptor
   8716 		 * at the end of the ring due to the semantics of the
   8717 		 * TDT register, plus one more in the event we need
   8718 		 * to load offload context.
   8719 		 */
   8720 		if (segs_needed > txq->txq_free - 2) {
   8721 			/*
   8722 			 * Not enough free descriptors to transmit this
   8723 			 * packet.  We haven't committed anything yet,
   8724 			 * so just unload the DMA map, put the packet
   8725 			 * pack on the queue, and punt. Notify the upper
   8726 			 * layer that there are no more slots left.
   8727 			 */
   8728 			DPRINTF(sc, WM_DEBUG_TX,
   8729 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8730 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8731 				segs_needed, txq->txq_free - 1));
   8732 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8733 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8734 			WM_Q_EVCNT_INCR(txq, txdstall);
   8735 			break;
   8736 		}
   8737 
   8738 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8739 
   8740 		DPRINTF(sc, WM_DEBUG_TX,
   8741 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8742 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8743 
   8744 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8745 
   8746 		/*
   8747 		 * Store a pointer to the packet so that we can free it
   8748 		 * later.
   8749 		 *
   8750 		 * Initially, we consider the number of descriptors the
   8751 		 * packet uses the number of DMA segments.  This may be
   8752 		 * incremented by 1 if we do checksum offload (a descriptor
   8753 		 * is used to set the checksum context).
   8754 		 */
   8755 		txs->txs_mbuf = m0;
   8756 		txs->txs_firstdesc = txq->txq_next;
   8757 		txs->txs_ndesc = segs_needed;
   8758 
   8759 		/* Set up offload parameters for this packet. */
   8760 		uint32_t cmdlen, fields, dcmdlen;
   8761 		if (m0->m_pkthdr.csum_flags &
   8762 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8763 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8764 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8765 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8766 			    &do_csum);
   8767 		} else {
   8768 			do_csum = false;
   8769 			cmdlen = 0;
   8770 			fields = 0;
   8771 		}
   8772 
   8773 		/* Sync the DMA map. */
   8774 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8775 		    BUS_DMASYNC_PREWRITE);
   8776 
   8777 		/* Initialize the first transmit descriptor. */
   8778 		nexttx = txq->txq_next;
   8779 		if (!do_csum) {
   8780 			/* Setup a legacy descriptor */
   8781 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8782 			    dmamap->dm_segs[0].ds_addr);
   8783 			txq->txq_descs[nexttx].wtx_cmdlen =
   8784 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8785 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8786 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8787 			if (vlan_has_tag(m0)) {
   8788 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8789 				    htole32(WTX_CMD_VLE);
   8790 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8791 				    htole16(vlan_get_tag(m0));
   8792 			} else
   8793 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8794 
   8795 			dcmdlen = 0;
   8796 		} else {
   8797 			/* Setup an advanced data descriptor */
   8798 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8799 			    htole64(dmamap->dm_segs[0].ds_addr);
   8800 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8801 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8802 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8803 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8804 			    htole32(fields);
   8805 			DPRINTF(sc, WM_DEBUG_TX,
   8806 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8807 				device_xname(sc->sc_dev), nexttx,
   8808 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8809 			DPRINTF(sc, WM_DEBUG_TX,
   8810 			    ("\t 0x%08x%08x\n", fields,
   8811 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8812 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8813 		}
   8814 
   8815 		lasttx = nexttx;
   8816 		nexttx = WM_NEXTTX(txq, nexttx);
   8817 		/*
   8818 		 * Fill in the next descriptors. legacy or advanced format
   8819 		 * is the same here
   8820 		 */
   8821 		for (seg = 1; seg < dmamap->dm_nsegs;
   8822 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8823 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8824 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8825 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8826 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8827 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8828 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8829 			lasttx = nexttx;
   8830 
   8831 			DPRINTF(sc, WM_DEBUG_TX,
   8832 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8833 				device_xname(sc->sc_dev), nexttx,
   8834 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8835 				dmamap->dm_segs[seg].ds_len));
   8836 		}
   8837 
   8838 		KASSERT(lasttx != -1);
   8839 
   8840 		/*
   8841 		 * Set up the command byte on the last descriptor of
   8842 		 * the packet. If we're in the interrupt delay window,
   8843 		 * delay the interrupt.
   8844 		 */
   8845 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8846 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8847 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8848 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8849 
   8850 		txs->txs_lastdesc = lasttx;
   8851 
   8852 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8853 		    device_xname(sc->sc_dev),
   8854 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8855 
   8856 		/* Sync the descriptors we're using. */
   8857 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8858 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8859 
   8860 		/* Give the packet to the chip. */
   8861 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8862 		sent = true;
   8863 
   8864 		DPRINTF(sc, WM_DEBUG_TX,
   8865 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8866 
   8867 		DPRINTF(sc, WM_DEBUG_TX,
   8868 		    ("%s: TX: finished transmitting packet, job %d\n",
   8869 			device_xname(sc->sc_dev), txq->txq_snext));
   8870 
   8871 		/* Advance the tx pointer. */
   8872 		txq->txq_free -= txs->txs_ndesc;
   8873 		txq->txq_next = nexttx;
   8874 
   8875 		txq->txq_sfree--;
   8876 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8877 
   8878 		/* Pass the packet to any BPF listeners. */
   8879 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8880 	}
   8881 
   8882 	if (m0 != NULL) {
   8883 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8884 		WM_Q_EVCNT_INCR(txq, descdrop);
   8885 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8886 			__func__));
   8887 		m_freem(m0);
   8888 	}
   8889 
   8890 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8891 		/* No more slots; notify upper layer. */
   8892 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8893 	}
   8894 
   8895 	if (sent) {
   8896 		/* Set a watchdog timer in case the chip flakes out. */
   8897 		txq->txq_lastsent = time_uptime;
   8898 		txq->txq_sending = true;
   8899 	}
   8900 }
   8901 
   8902 static void
   8903 wm_deferred_start_locked(struct wm_txqueue *txq)
   8904 {
   8905 	struct wm_softc *sc = txq->txq_sc;
   8906 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8907 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8908 	int qid = wmq->wmq_id;
   8909 
   8910 	KASSERT(mutex_owned(txq->txq_lock));
   8911 
   8912 	if (txq->txq_stopping) {
   8913 		mutex_exit(txq->txq_lock);
   8914 		return;
   8915 	}
   8916 
   8917 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8918 		/* XXX need for ALTQ or one CPU system */
   8919 		if (qid == 0)
   8920 			wm_nq_start_locked(ifp);
   8921 		wm_nq_transmit_locked(ifp, txq);
   8922 	} else {
   8923 		/* XXX need for ALTQ or one CPU system */
   8924 		if (qid == 0)
   8925 			wm_start_locked(ifp);
   8926 		wm_transmit_locked(ifp, txq);
   8927 	}
   8928 }
   8929 
   8930 /* Interrupt */
   8931 
   8932 /*
   8933  * wm_txeof:
   8934  *
   8935  *	Helper; handle transmit interrupts.
   8936  */
   8937 static bool
   8938 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8939 {
   8940 	struct wm_softc *sc = txq->txq_sc;
   8941 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8942 	struct wm_txsoft *txs;
   8943 	int count = 0;
   8944 	int i;
   8945 	uint8_t status;
   8946 	bool more = false;
   8947 
   8948 	KASSERT(mutex_owned(txq->txq_lock));
   8949 
   8950 	if (txq->txq_stopping)
   8951 		return false;
   8952 
   8953 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8954 
   8955 	/*
   8956 	 * Go through the Tx list and free mbufs for those
   8957 	 * frames which have been transmitted.
   8958 	 */
   8959 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8960 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8961 		if (limit-- == 0) {
   8962 			more = true;
   8963 			DPRINTF(sc, WM_DEBUG_TX,
   8964 			    ("%s: TX: loop limited, job %d is not processed\n",
   8965 				device_xname(sc->sc_dev), i));
   8966 			break;
   8967 		}
   8968 
   8969 		txs = &txq->txq_soft[i];
   8970 
   8971 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8972 			device_xname(sc->sc_dev), i));
   8973 
   8974 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8975 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8976 
   8977 		status =
   8978 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8979 		if ((status & WTX_ST_DD) == 0) {
   8980 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8981 			    BUS_DMASYNC_PREREAD);
   8982 			break;
   8983 		}
   8984 
   8985 		count++;
   8986 		DPRINTF(sc, WM_DEBUG_TX,
   8987 		    ("%s: TX: job %d done: descs %d..%d\n",
   8988 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8989 		    txs->txs_lastdesc));
   8990 
   8991 		/*
   8992 		 * XXX We should probably be using the statistics
   8993 		 * XXX registers, but I don't know if they exist
   8994 		 * XXX on chips before the i82544.
   8995 		 */
   8996 
   8997 #ifdef WM_EVENT_COUNTERS
   8998 		if (status & WTX_ST_TU)
   8999 			WM_Q_EVCNT_INCR(txq, underrun);
   9000 #endif /* WM_EVENT_COUNTERS */
   9001 
   9002 		/*
   9003 		 * 82574 and newer's document says the status field has neither
   9004 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9005 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9006 		 * Developer's Manual", 82574 datasheet and newer.
   9007 		 *
   9008 		 * XXX I saw the LC bit was set on I218 even though the media
   9009 		 * was full duplex, so the bit might be used for other
   9010 		 * meaning ...(I have no document).
   9011 		 */
   9012 
   9013 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9014 		    && ((sc->sc_type < WM_T_82574)
   9015 			|| (sc->sc_type == WM_T_80003))) {
   9016 			if_statinc(ifp, if_oerrors);
   9017 			if (status & WTX_ST_LC)
   9018 				log(LOG_WARNING, "%s: late collision\n",
   9019 				    device_xname(sc->sc_dev));
   9020 			else if (status & WTX_ST_EC) {
   9021 				if_statadd(ifp, if_collisions,
   9022 				    TX_COLLISION_THRESHOLD + 1);
   9023 				log(LOG_WARNING, "%s: excessive collisions\n",
   9024 				    device_xname(sc->sc_dev));
   9025 			}
   9026 		} else
   9027 			if_statinc(ifp, if_opackets);
   9028 
   9029 		txq->txq_packets++;
   9030 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9031 
   9032 		txq->txq_free += txs->txs_ndesc;
   9033 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9034 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9035 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9036 		m_freem(txs->txs_mbuf);
   9037 		txs->txs_mbuf = NULL;
   9038 	}
   9039 
   9040 	/* Update the dirty transmit buffer pointer. */
   9041 	txq->txq_sdirty = i;
   9042 	DPRINTF(sc, WM_DEBUG_TX,
   9043 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9044 
   9045 	if (count != 0)
   9046 		rnd_add_uint32(&sc->rnd_source, count);
   9047 
   9048 	/*
   9049 	 * If there are no more pending transmissions, cancel the watchdog
   9050 	 * timer.
   9051 	 */
   9052 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9053 		txq->txq_sending = false;
   9054 
   9055 	return more;
   9056 }
   9057 
   9058 static inline uint32_t
   9059 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9060 {
   9061 	struct wm_softc *sc = rxq->rxq_sc;
   9062 
   9063 	if (sc->sc_type == WM_T_82574)
   9064 		return EXTRXC_STATUS(
   9065 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9066 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9067 		return NQRXC_STATUS(
   9068 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9069 	else
   9070 		return rxq->rxq_descs[idx].wrx_status;
   9071 }
   9072 
   9073 static inline uint32_t
   9074 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9075 {
   9076 	struct wm_softc *sc = rxq->rxq_sc;
   9077 
   9078 	if (sc->sc_type == WM_T_82574)
   9079 		return EXTRXC_ERROR(
   9080 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9081 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9082 		return NQRXC_ERROR(
   9083 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9084 	else
   9085 		return rxq->rxq_descs[idx].wrx_errors;
   9086 }
   9087 
   9088 static inline uint16_t
   9089 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9090 {
   9091 	struct wm_softc *sc = rxq->rxq_sc;
   9092 
   9093 	if (sc->sc_type == WM_T_82574)
   9094 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9095 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9096 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9097 	else
   9098 		return rxq->rxq_descs[idx].wrx_special;
   9099 }
   9100 
   9101 static inline int
   9102 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9103 {
   9104 	struct wm_softc *sc = rxq->rxq_sc;
   9105 
   9106 	if (sc->sc_type == WM_T_82574)
   9107 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9108 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9109 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9110 	else
   9111 		return rxq->rxq_descs[idx].wrx_len;
   9112 }
   9113 
   9114 #ifdef WM_DEBUG
   9115 static inline uint32_t
   9116 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9117 {
   9118 	struct wm_softc *sc = rxq->rxq_sc;
   9119 
   9120 	if (sc->sc_type == WM_T_82574)
   9121 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9122 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9123 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9124 	else
   9125 		return 0;
   9126 }
   9127 
   9128 static inline uint8_t
   9129 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9130 {
   9131 	struct wm_softc *sc = rxq->rxq_sc;
   9132 
   9133 	if (sc->sc_type == WM_T_82574)
   9134 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9135 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9136 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9137 	else
   9138 		return 0;
   9139 }
   9140 #endif /* WM_DEBUG */
   9141 
   9142 static inline bool
   9143 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9144     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9145 {
   9146 
   9147 	if (sc->sc_type == WM_T_82574)
   9148 		return (status & ext_bit) != 0;
   9149 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9150 		return (status & nq_bit) != 0;
   9151 	else
   9152 		return (status & legacy_bit) != 0;
   9153 }
   9154 
   9155 static inline bool
   9156 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9157     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9158 {
   9159 
   9160 	if (sc->sc_type == WM_T_82574)
   9161 		return (error & ext_bit) != 0;
   9162 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9163 		return (error & nq_bit) != 0;
   9164 	else
   9165 		return (error & legacy_bit) != 0;
   9166 }
   9167 
   9168 static inline bool
   9169 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9170 {
   9171 
   9172 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9173 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9174 		return true;
   9175 	else
   9176 		return false;
   9177 }
   9178 
   9179 static inline bool
   9180 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9181 {
   9182 	struct wm_softc *sc = rxq->rxq_sc;
   9183 
   9184 	/* XXX missing error bit for newqueue? */
   9185 	if (wm_rxdesc_is_set_error(sc, errors,
   9186 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9187 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9188 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9189 		NQRXC_ERROR_RXE)) {
   9190 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9191 		    EXTRXC_ERROR_SE, 0))
   9192 			log(LOG_WARNING, "%s: symbol error\n",
   9193 			    device_xname(sc->sc_dev));
   9194 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9195 		    EXTRXC_ERROR_SEQ, 0))
   9196 			log(LOG_WARNING, "%s: receive sequence error\n",
   9197 			    device_xname(sc->sc_dev));
   9198 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9199 		    EXTRXC_ERROR_CE, 0))
   9200 			log(LOG_WARNING, "%s: CRC error\n",
   9201 			    device_xname(sc->sc_dev));
   9202 		return true;
   9203 	}
   9204 
   9205 	return false;
   9206 }
   9207 
   9208 static inline bool
   9209 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9210 {
   9211 	struct wm_softc *sc = rxq->rxq_sc;
   9212 
   9213 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9214 		NQRXC_STATUS_DD)) {
   9215 		/* We have processed all of the receive descriptors. */
   9216 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9217 		return false;
   9218 	}
   9219 
   9220 	return true;
   9221 }
   9222 
   9223 static inline bool
   9224 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9225     uint16_t vlantag, struct mbuf *m)
   9226 {
   9227 
   9228 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9229 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9230 		vlan_set_tag(m, le16toh(vlantag));
   9231 	}
   9232 
   9233 	return true;
   9234 }
   9235 
   9236 static inline void
   9237 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9238     uint32_t errors, struct mbuf *m)
   9239 {
   9240 	struct wm_softc *sc = rxq->rxq_sc;
   9241 
   9242 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9243 		if (wm_rxdesc_is_set_status(sc, status,
   9244 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9245 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9246 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9247 			if (wm_rxdesc_is_set_error(sc, errors,
   9248 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9249 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9250 		}
   9251 		if (wm_rxdesc_is_set_status(sc, status,
   9252 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9253 			/*
   9254 			 * Note: we don't know if this was TCP or UDP,
   9255 			 * so we just set both bits, and expect the
   9256 			 * upper layers to deal.
   9257 			 */
   9258 			WM_Q_EVCNT_INCR(rxq, tusum);
   9259 			m->m_pkthdr.csum_flags |=
   9260 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9261 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9262 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9263 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9264 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9265 		}
   9266 	}
   9267 }
   9268 
   9269 /*
   9270  * wm_rxeof:
   9271  *
   9272  *	Helper; handle receive interrupts.
   9273  */
   9274 static bool
   9275 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9276 {
   9277 	struct wm_softc *sc = rxq->rxq_sc;
   9278 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9279 	struct wm_rxsoft *rxs;
   9280 	struct mbuf *m;
   9281 	int i, len;
   9282 	int count = 0;
   9283 	uint32_t status, errors;
   9284 	uint16_t vlantag;
   9285 	bool more = false;
   9286 
   9287 	KASSERT(mutex_owned(rxq->rxq_lock));
   9288 
   9289 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9290 		if (limit-- == 0) {
   9291 			more = true;
   9292 			DPRINTF(sc, WM_DEBUG_RX,
   9293 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9294 				device_xname(sc->sc_dev), i));
   9295 			break;
   9296 		}
   9297 
   9298 		rxs = &rxq->rxq_soft[i];
   9299 
   9300 		DPRINTF(sc, WM_DEBUG_RX,
   9301 		    ("%s: RX: checking descriptor %d\n",
   9302 			device_xname(sc->sc_dev), i));
   9303 		wm_cdrxsync(rxq, i,
   9304 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9305 
   9306 		status = wm_rxdesc_get_status(rxq, i);
   9307 		errors = wm_rxdesc_get_errors(rxq, i);
   9308 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9309 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9310 #ifdef WM_DEBUG
   9311 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9312 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9313 #endif
   9314 
   9315 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9316 			break;
   9317 		}
   9318 
   9319 		count++;
   9320 		if (__predict_false(rxq->rxq_discard)) {
   9321 			DPRINTF(sc, WM_DEBUG_RX,
   9322 			    ("%s: RX: discarding contents of descriptor %d\n",
   9323 				device_xname(sc->sc_dev), i));
   9324 			wm_init_rxdesc(rxq, i);
   9325 			if (wm_rxdesc_is_eop(rxq, status)) {
   9326 				/* Reset our state. */
   9327 				DPRINTF(sc, WM_DEBUG_RX,
   9328 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9329 					device_xname(sc->sc_dev)));
   9330 				rxq->rxq_discard = 0;
   9331 			}
   9332 			continue;
   9333 		}
   9334 
   9335 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9336 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9337 
   9338 		m = rxs->rxs_mbuf;
   9339 
   9340 		/*
   9341 		 * Add a new receive buffer to the ring, unless of
   9342 		 * course the length is zero. Treat the latter as a
   9343 		 * failed mapping.
   9344 		 */
   9345 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9346 			/*
   9347 			 * Failed, throw away what we've done so
   9348 			 * far, and discard the rest of the packet.
   9349 			 */
   9350 			if_statinc(ifp, if_ierrors);
   9351 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9352 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9353 			wm_init_rxdesc(rxq, i);
   9354 			if (!wm_rxdesc_is_eop(rxq, status))
   9355 				rxq->rxq_discard = 1;
   9356 			if (rxq->rxq_head != NULL)
   9357 				m_freem(rxq->rxq_head);
   9358 			WM_RXCHAIN_RESET(rxq);
   9359 			DPRINTF(sc, WM_DEBUG_RX,
   9360 			    ("%s: RX: Rx buffer allocation failed, "
   9361 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9362 				rxq->rxq_discard ? " (discard)" : ""));
   9363 			continue;
   9364 		}
   9365 
   9366 		m->m_len = len;
   9367 		rxq->rxq_len += len;
   9368 		DPRINTF(sc, WM_DEBUG_RX,
   9369 		    ("%s: RX: buffer at %p len %d\n",
   9370 			device_xname(sc->sc_dev), m->m_data, len));
   9371 
   9372 		/* If this is not the end of the packet, keep looking. */
   9373 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9374 			WM_RXCHAIN_LINK(rxq, m);
   9375 			DPRINTF(sc, WM_DEBUG_RX,
   9376 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9377 				device_xname(sc->sc_dev), rxq->rxq_len));
   9378 			continue;
   9379 		}
   9380 
   9381 		/*
   9382 		 * Okay, we have the entire packet now. The chip is
   9383 		 * configured to include the FCS except I35[04], I21[01].
   9384 		 * (not all chips can be configured to strip it), so we need
   9385 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9386 		 * in RCTL register is always set, so we don't trim it.
   9387 		 * PCH2 and newer chip also not include FCS when jumbo
   9388 		 * frame is used to do workaround an errata.
   9389 		 * May need to adjust length of previous mbuf in the
   9390 		 * chain if the current mbuf is too short.
   9391 		 */
   9392 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9393 			if (m->m_len < ETHER_CRC_LEN) {
   9394 				rxq->rxq_tail->m_len
   9395 				    -= (ETHER_CRC_LEN - m->m_len);
   9396 				m->m_len = 0;
   9397 			} else
   9398 				m->m_len -= ETHER_CRC_LEN;
   9399 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9400 		} else
   9401 			len = rxq->rxq_len;
   9402 
   9403 		WM_RXCHAIN_LINK(rxq, m);
   9404 
   9405 		*rxq->rxq_tailp = NULL;
   9406 		m = rxq->rxq_head;
   9407 
   9408 		WM_RXCHAIN_RESET(rxq);
   9409 
   9410 		DPRINTF(sc, WM_DEBUG_RX,
   9411 		    ("%s: RX: have entire packet, len -> %d\n",
   9412 			device_xname(sc->sc_dev), len));
   9413 
   9414 		/* If an error occurred, update stats and drop the packet. */
   9415 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9416 			m_freem(m);
   9417 			continue;
   9418 		}
   9419 
   9420 		/* No errors.  Receive the packet. */
   9421 		m_set_rcvif(m, ifp);
   9422 		m->m_pkthdr.len = len;
   9423 		/*
   9424 		 * TODO
   9425 		 * should be save rsshash and rsstype to this mbuf.
   9426 		 */
   9427 		DPRINTF(sc, WM_DEBUG_RX,
   9428 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9429 			device_xname(sc->sc_dev), rsstype, rsshash));
   9430 
   9431 		/*
   9432 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9433 		 * for us.  Associate the tag with the packet.
   9434 		 */
   9435 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9436 			continue;
   9437 
   9438 		/* Set up checksum info for this packet. */
   9439 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9440 
   9441 		rxq->rxq_packets++;
   9442 		rxq->rxq_bytes += len;
   9443 		/* Pass it on. */
   9444 		if_percpuq_enqueue(sc->sc_ipq, m);
   9445 
   9446 		if (rxq->rxq_stopping)
   9447 			break;
   9448 	}
   9449 	rxq->rxq_ptr = i;
   9450 
   9451 	if (count != 0)
   9452 		rnd_add_uint32(&sc->rnd_source, count);
   9453 
   9454 	DPRINTF(sc, WM_DEBUG_RX,
   9455 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9456 
   9457 	return more;
   9458 }
   9459 
   9460 /*
   9461  * wm_linkintr_gmii:
   9462  *
   9463  *	Helper; handle link interrupts for GMII.
   9464  */
   9465 static void
   9466 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9467 {
   9468 	device_t dev = sc->sc_dev;
   9469 	uint32_t status, reg;
   9470 	bool link;
   9471 	int rv;
   9472 
   9473 	KASSERT(WM_CORE_LOCKED(sc));
   9474 
   9475 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9476 		__func__));
   9477 
   9478 	if ((icr & ICR_LSC) == 0) {
   9479 		if (icr & ICR_RXSEQ)
   9480 			DPRINTF(sc, WM_DEBUG_LINK,
   9481 			    ("%s: LINK Receive sequence error\n",
   9482 				device_xname(dev)));
   9483 		return;
   9484 	}
   9485 
   9486 	/* Link status changed */
   9487 	status = CSR_READ(sc, WMREG_STATUS);
   9488 	link = status & STATUS_LU;
   9489 	if (link) {
   9490 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9491 			device_xname(dev),
   9492 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9493 		if (wm_phy_need_linkdown_discard(sc))
   9494 			wm_clear_linkdown_discard(sc);
   9495 	} else {
   9496 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9497 			device_xname(dev)));
   9498 		if (wm_phy_need_linkdown_discard(sc))
   9499 			wm_set_linkdown_discard(sc);
   9500 	}
   9501 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9502 		wm_gig_downshift_workaround_ich8lan(sc);
   9503 
   9504 	if ((sc->sc_type == WM_T_ICH8)
   9505 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9506 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9507 	}
   9508 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9509 		device_xname(dev)));
   9510 	mii_pollstat(&sc->sc_mii);
   9511 	if (sc->sc_type == WM_T_82543) {
   9512 		int miistatus, active;
   9513 
   9514 		/*
   9515 		 * With 82543, we need to force speed and
   9516 		 * duplex on the MAC equal to what the PHY
   9517 		 * speed and duplex configuration is.
   9518 		 */
   9519 		miistatus = sc->sc_mii.mii_media_status;
   9520 
   9521 		if (miistatus & IFM_ACTIVE) {
   9522 			active = sc->sc_mii.mii_media_active;
   9523 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9524 			switch (IFM_SUBTYPE(active)) {
   9525 			case IFM_10_T:
   9526 				sc->sc_ctrl |= CTRL_SPEED_10;
   9527 				break;
   9528 			case IFM_100_TX:
   9529 				sc->sc_ctrl |= CTRL_SPEED_100;
   9530 				break;
   9531 			case IFM_1000_T:
   9532 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9533 				break;
   9534 			default:
   9535 				/*
   9536 				 * Fiber?
   9537 				 * Shoud not enter here.
   9538 				 */
   9539 				device_printf(dev, "unknown media (%x)\n",
   9540 				    active);
   9541 				break;
   9542 			}
   9543 			if (active & IFM_FDX)
   9544 				sc->sc_ctrl |= CTRL_FD;
   9545 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9546 		}
   9547 	} else if (sc->sc_type == WM_T_PCH) {
   9548 		wm_k1_gig_workaround_hv(sc,
   9549 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9550 	}
   9551 
   9552 	/*
   9553 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9554 	 * aggressive resulting in many collisions. To avoid this, increase
   9555 	 * the IPG and reduce Rx latency in the PHY.
   9556 	 */
   9557 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9558 	    && link) {
   9559 		uint32_t tipg_reg;
   9560 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9561 		bool fdx;
   9562 		uint16_t emi_addr, emi_val;
   9563 
   9564 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9565 		tipg_reg &= ~TIPG_IPGT_MASK;
   9566 		fdx = status & STATUS_FD;
   9567 
   9568 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9569 			tipg_reg |= 0xff;
   9570 			/* Reduce Rx latency in analog PHY */
   9571 			emi_val = 0;
   9572 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9573 		    fdx && speed != STATUS_SPEED_1000) {
   9574 			tipg_reg |= 0xc;
   9575 			emi_val = 1;
   9576 		} else {
   9577 			/* Roll back the default values */
   9578 			tipg_reg |= 0x08;
   9579 			emi_val = 1;
   9580 		}
   9581 
   9582 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9583 
   9584 		rv = sc->phy.acquire(sc);
   9585 		if (rv)
   9586 			return;
   9587 
   9588 		if (sc->sc_type == WM_T_PCH2)
   9589 			emi_addr = I82579_RX_CONFIG;
   9590 		else
   9591 			emi_addr = I217_RX_CONFIG;
   9592 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9593 
   9594 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9595 			uint16_t phy_reg;
   9596 
   9597 			sc->phy.readreg_locked(dev, 2,
   9598 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9599 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9600 			if (speed == STATUS_SPEED_100
   9601 			    || speed == STATUS_SPEED_10)
   9602 				phy_reg |= 0x3e8;
   9603 			else
   9604 				phy_reg |= 0xfa;
   9605 			sc->phy.writereg_locked(dev, 2,
   9606 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9607 
   9608 			if (speed == STATUS_SPEED_1000) {
   9609 				sc->phy.readreg_locked(dev, 2,
   9610 				    HV_PM_CTRL, &phy_reg);
   9611 
   9612 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9613 
   9614 				sc->phy.writereg_locked(dev, 2,
   9615 				    HV_PM_CTRL, phy_reg);
   9616 			}
   9617 		}
   9618 		sc->phy.release(sc);
   9619 
   9620 		if (rv)
   9621 			return;
   9622 
   9623 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9624 			uint16_t data, ptr_gap;
   9625 
   9626 			if (speed == STATUS_SPEED_1000) {
   9627 				rv = sc->phy.acquire(sc);
   9628 				if (rv)
   9629 					return;
   9630 
   9631 				rv = sc->phy.readreg_locked(dev, 2,
   9632 				    I82579_UNKNOWN1, &data);
   9633 				if (rv) {
   9634 					sc->phy.release(sc);
   9635 					return;
   9636 				}
   9637 
   9638 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9639 				if (ptr_gap < 0x18) {
   9640 					data &= ~(0x3ff << 2);
   9641 					data |= (0x18 << 2);
   9642 					rv = sc->phy.writereg_locked(dev,
   9643 					    2, I82579_UNKNOWN1, data);
   9644 				}
   9645 				sc->phy.release(sc);
   9646 				if (rv)
   9647 					return;
   9648 			} else {
   9649 				rv = sc->phy.acquire(sc);
   9650 				if (rv)
   9651 					return;
   9652 
   9653 				rv = sc->phy.writereg_locked(dev, 2,
   9654 				    I82579_UNKNOWN1, 0xc023);
   9655 				sc->phy.release(sc);
   9656 				if (rv)
   9657 					return;
   9658 
   9659 			}
   9660 		}
   9661 	}
   9662 
   9663 	/*
   9664 	 * I217 Packet Loss issue:
   9665 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9666 	 * on power up.
   9667 	 * Set the Beacon Duration for I217 to 8 usec
   9668 	 */
   9669 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9670 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9671 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9672 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9673 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9674 	}
   9675 
   9676 	/* Work-around I218 hang issue */
   9677 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9678 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9679 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9680 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9681 		wm_k1_workaround_lpt_lp(sc, link);
   9682 
   9683 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9684 		/*
   9685 		 * Set platform power management values for Latency
   9686 		 * Tolerance Reporting (LTR)
   9687 		 */
   9688 		wm_platform_pm_pch_lpt(sc,
   9689 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9690 	}
   9691 
   9692 	/* Clear link partner's EEE ability */
   9693 	sc->eee_lp_ability = 0;
   9694 
   9695 	/* FEXTNVM6 K1-off workaround */
   9696 	if (sc->sc_type == WM_T_PCH_SPT) {
   9697 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9698 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9699 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9700 		else
   9701 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9702 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9703 	}
   9704 
   9705 	if (!link)
   9706 		return;
   9707 
   9708 	switch (sc->sc_type) {
   9709 	case WM_T_PCH2:
   9710 		wm_k1_workaround_lv(sc);
   9711 		/* FALLTHROUGH */
   9712 	case WM_T_PCH:
   9713 		if (sc->sc_phytype == WMPHY_82578)
   9714 			wm_link_stall_workaround_hv(sc);
   9715 		break;
   9716 	default:
   9717 		break;
   9718 	}
   9719 
   9720 	/* Enable/Disable EEE after link up */
   9721 	if (sc->sc_phytype > WMPHY_82579)
   9722 		wm_set_eee_pchlan(sc);
   9723 }
   9724 
   9725 /*
   9726  * wm_linkintr_tbi:
   9727  *
   9728  *	Helper; handle link interrupts for TBI mode.
   9729  */
   9730 static void
   9731 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9732 {
   9733 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9734 	uint32_t status;
   9735 
   9736 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9737 		__func__));
   9738 
   9739 	status = CSR_READ(sc, WMREG_STATUS);
   9740 	if (icr & ICR_LSC) {
   9741 		wm_check_for_link(sc);
   9742 		if (status & STATUS_LU) {
   9743 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9744 				device_xname(sc->sc_dev),
   9745 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9746 			/*
   9747 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9748 			 * so we should update sc->sc_ctrl
   9749 			 */
   9750 
   9751 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9752 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9753 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9754 			if (status & STATUS_FD)
   9755 				sc->sc_tctl |=
   9756 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9757 			else
   9758 				sc->sc_tctl |=
   9759 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9760 			if (sc->sc_ctrl & CTRL_TFCE)
   9761 				sc->sc_fcrtl |= FCRTL_XONE;
   9762 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9763 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9764 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9765 			sc->sc_tbi_linkup = 1;
   9766 			if_link_state_change(ifp, LINK_STATE_UP);
   9767 		} else {
   9768 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9769 				device_xname(sc->sc_dev)));
   9770 			sc->sc_tbi_linkup = 0;
   9771 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9772 		}
   9773 		/* Update LED */
   9774 		wm_tbi_serdes_set_linkled(sc);
   9775 	} else if (icr & ICR_RXSEQ)
   9776 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9777 			device_xname(sc->sc_dev)));
   9778 }
   9779 
   9780 /*
   9781  * wm_linkintr_serdes:
   9782  *
   9783  *	Helper; handle link interrupts for TBI mode.
   9784  */
   9785 static void
   9786 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9787 {
   9788 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9789 	struct mii_data *mii = &sc->sc_mii;
   9790 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9791 	uint32_t pcs_adv, pcs_lpab, reg;
   9792 
   9793 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9794 		__func__));
   9795 
   9796 	if (icr & ICR_LSC) {
   9797 		/* Check PCS */
   9798 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9799 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9800 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9801 				device_xname(sc->sc_dev)));
   9802 			mii->mii_media_status |= IFM_ACTIVE;
   9803 			sc->sc_tbi_linkup = 1;
   9804 			if_link_state_change(ifp, LINK_STATE_UP);
   9805 		} else {
   9806 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9807 				device_xname(sc->sc_dev)));
   9808 			mii->mii_media_status |= IFM_NONE;
   9809 			sc->sc_tbi_linkup = 0;
   9810 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9811 			wm_tbi_serdes_set_linkled(sc);
   9812 			return;
   9813 		}
   9814 		mii->mii_media_active |= IFM_1000_SX;
   9815 		if ((reg & PCS_LSTS_FDX) != 0)
   9816 			mii->mii_media_active |= IFM_FDX;
   9817 		else
   9818 			mii->mii_media_active |= IFM_HDX;
   9819 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9820 			/* Check flow */
   9821 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9822 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9823 				DPRINTF(sc, WM_DEBUG_LINK,
   9824 				    ("XXX LINKOK but not ACOMP\n"));
   9825 				return;
   9826 			}
   9827 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9828 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9829 			DPRINTF(sc, WM_DEBUG_LINK,
   9830 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9831 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9832 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9833 				mii->mii_media_active |= IFM_FLOW
   9834 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9835 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9836 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9837 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9838 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9839 				mii->mii_media_active |= IFM_FLOW
   9840 				    | IFM_ETH_TXPAUSE;
   9841 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9842 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9843 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9844 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9845 				mii->mii_media_active |= IFM_FLOW
   9846 				    | IFM_ETH_RXPAUSE;
   9847 		}
   9848 		/* Update LED */
   9849 		wm_tbi_serdes_set_linkled(sc);
   9850 	} else
   9851 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9852 		    device_xname(sc->sc_dev)));
   9853 }
   9854 
   9855 /*
   9856  * wm_linkintr:
   9857  *
   9858  *	Helper; handle link interrupts.
   9859  */
   9860 static void
   9861 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9862 {
   9863 
   9864 	KASSERT(WM_CORE_LOCKED(sc));
   9865 
   9866 	if (sc->sc_flags & WM_F_HAS_MII)
   9867 		wm_linkintr_gmii(sc, icr);
   9868 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9869 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9870 		wm_linkintr_serdes(sc, icr);
   9871 	else
   9872 		wm_linkintr_tbi(sc, icr);
   9873 }
   9874 
   9875 
   9876 static inline void
   9877 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9878 {
   9879 
   9880 	if (wmq->wmq_txrx_use_workqueue)
   9881 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9882 	else
   9883 		softint_schedule(wmq->wmq_si);
   9884 }
   9885 
   9886 static inline void
   9887 wm_legacy_intr_disable(struct wm_softc *sc)
   9888 {
   9889 
   9890 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   9891 }
   9892 
   9893 static inline void
   9894 wm_legacy_intr_enable(struct wm_softc *sc)
   9895 {
   9896 
   9897 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   9898 }
   9899 
   9900 /*
   9901  * wm_intr_legacy:
   9902  *
   9903  *	Interrupt service routine for INTx and MSI.
   9904  */
   9905 static int
   9906 wm_intr_legacy(void *arg)
   9907 {
   9908 	struct wm_softc *sc = arg;
   9909 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9910 	struct wm_queue *wmq = &sc->sc_queue[0];
   9911 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9912 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9913 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9914 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9915 	uint32_t icr, rndval = 0;
   9916 	bool more = false;
   9917 
   9918 	icr = CSR_READ(sc, WMREG_ICR);
   9919 	if ((icr & sc->sc_icr) == 0)
   9920 		return 0;
   9921 
   9922 	DPRINTF(sc, WM_DEBUG_TX,
   9923 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9924 	if (rndval == 0)
   9925 		rndval = icr;
   9926 
   9927 	mutex_enter(rxq->rxq_lock);
   9928 
   9929 	if (rxq->rxq_stopping) {
   9930 		mutex_exit(rxq->rxq_lock);
   9931 		return 1;
   9932 	}
   9933 
   9934 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9935 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9936 		DPRINTF(sc, WM_DEBUG_RX,
   9937 		    ("%s: RX: got Rx intr 0x%08x\n",
   9938 			device_xname(sc->sc_dev),
   9939 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   9940 		WM_Q_EVCNT_INCR(rxq, intr);
   9941 	}
   9942 #endif
   9943 	/*
   9944 	 * wm_rxeof() does *not* call upper layer functions directly,
   9945 	 * as if_percpuq_enqueue() just call softint_schedule().
   9946 	 * So, we can call wm_rxeof() in interrupt context.
   9947 	 */
   9948 	more = wm_rxeof(rxq, rxlimit);
   9949 
   9950 	mutex_exit(rxq->rxq_lock);
   9951 	mutex_enter(txq->txq_lock);
   9952 
   9953 	if (txq->txq_stopping) {
   9954 		mutex_exit(txq->txq_lock);
   9955 		return 1;
   9956 	}
   9957 
   9958 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9959 	if (icr & ICR_TXDW) {
   9960 		DPRINTF(sc, WM_DEBUG_TX,
   9961 		    ("%s: TX: got TXDW interrupt\n",
   9962 			device_xname(sc->sc_dev)));
   9963 		WM_Q_EVCNT_INCR(txq, txdw);
   9964 	}
   9965 #endif
   9966 	more |= wm_txeof(txq, txlimit);
   9967 	if (!IF_IS_EMPTY(&ifp->if_snd))
   9968 		more = true;
   9969 
   9970 	mutex_exit(txq->txq_lock);
   9971 	WM_CORE_LOCK(sc);
   9972 
   9973 	if (sc->sc_core_stopping) {
   9974 		WM_CORE_UNLOCK(sc);
   9975 		return 1;
   9976 	}
   9977 
   9978 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9979 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9980 		wm_linkintr(sc, icr);
   9981 	}
   9982 	if ((icr & ICR_GPI(0)) != 0)
   9983 		device_printf(sc->sc_dev, "got module interrupt\n");
   9984 
   9985 	WM_CORE_UNLOCK(sc);
   9986 
   9987 	if (icr & ICR_RXO) {
   9988 #if defined(WM_DEBUG)
   9989 		log(LOG_WARNING, "%s: Receive overrun\n",
   9990 		    device_xname(sc->sc_dev));
   9991 #endif /* defined(WM_DEBUG) */
   9992 	}
   9993 
   9994 	rnd_add_uint32(&sc->rnd_source, rndval);
   9995 
   9996 	if (more) {
   9997 		/* Try to get more packets going. */
   9998 		wm_legacy_intr_disable(sc);
   9999 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10000 		wm_sched_handle_queue(sc, wmq);
   10001 	}
   10002 
   10003 	return 1;
   10004 }
   10005 
   10006 static inline void
   10007 wm_txrxintr_disable(struct wm_queue *wmq)
   10008 {
   10009 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10010 
   10011 	if (__predict_false(!wm_is_using_msix(sc))) {
   10012 		return wm_legacy_intr_disable(sc);
   10013 	}
   10014 
   10015 	if (sc->sc_type == WM_T_82574)
   10016 		CSR_WRITE(sc, WMREG_IMC,
   10017 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10018 	else if (sc->sc_type == WM_T_82575)
   10019 		CSR_WRITE(sc, WMREG_EIMC,
   10020 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10021 	else
   10022 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10023 }
   10024 
   10025 static inline void
   10026 wm_txrxintr_enable(struct wm_queue *wmq)
   10027 {
   10028 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10029 
   10030 	wm_itrs_calculate(sc, wmq);
   10031 
   10032 	if (__predict_false(!wm_is_using_msix(sc))) {
   10033 		return wm_legacy_intr_enable(sc);
   10034 	}
   10035 
   10036 	/*
   10037 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10038 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10039 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10040 	 * while each wm_handle_queue(wmq) is runnig.
   10041 	 */
   10042 	if (sc->sc_type == WM_T_82574)
   10043 		CSR_WRITE(sc, WMREG_IMS,
   10044 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10045 	else if (sc->sc_type == WM_T_82575)
   10046 		CSR_WRITE(sc, WMREG_EIMS,
   10047 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10048 	else
   10049 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10050 }
   10051 
   10052 static int
   10053 wm_txrxintr_msix(void *arg)
   10054 {
   10055 	struct wm_queue *wmq = arg;
   10056 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10057 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10058 	struct wm_softc *sc = txq->txq_sc;
   10059 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10060 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10061 	bool txmore;
   10062 	bool rxmore;
   10063 
   10064 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10065 
   10066 	DPRINTF(sc, WM_DEBUG_TX,
   10067 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10068 
   10069 	wm_txrxintr_disable(wmq);
   10070 
   10071 	mutex_enter(txq->txq_lock);
   10072 
   10073 	if (txq->txq_stopping) {
   10074 		mutex_exit(txq->txq_lock);
   10075 		return 1;
   10076 	}
   10077 
   10078 	WM_Q_EVCNT_INCR(txq, txdw);
   10079 	txmore = wm_txeof(txq, txlimit);
   10080 	/* wm_deferred start() is done in wm_handle_queue(). */
   10081 	mutex_exit(txq->txq_lock);
   10082 
   10083 	DPRINTF(sc, WM_DEBUG_RX,
   10084 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10085 	mutex_enter(rxq->rxq_lock);
   10086 
   10087 	if (rxq->rxq_stopping) {
   10088 		mutex_exit(rxq->rxq_lock);
   10089 		return 1;
   10090 	}
   10091 
   10092 	WM_Q_EVCNT_INCR(rxq, intr);
   10093 	rxmore = wm_rxeof(rxq, rxlimit);
   10094 	mutex_exit(rxq->rxq_lock);
   10095 
   10096 	wm_itrs_writereg(sc, wmq);
   10097 
   10098 	if (txmore || rxmore) {
   10099 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10100 		wm_sched_handle_queue(sc, wmq);
   10101 	} else
   10102 		wm_txrxintr_enable(wmq);
   10103 
   10104 	return 1;
   10105 }
   10106 
   10107 static void
   10108 wm_handle_queue(void *arg)
   10109 {
   10110 	struct wm_queue *wmq = arg;
   10111 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10112 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10113 	struct wm_softc *sc = txq->txq_sc;
   10114 	u_int txlimit = sc->sc_tx_process_limit;
   10115 	u_int rxlimit = sc->sc_rx_process_limit;
   10116 	bool txmore;
   10117 	bool rxmore;
   10118 
   10119 	mutex_enter(txq->txq_lock);
   10120 	if (txq->txq_stopping) {
   10121 		mutex_exit(txq->txq_lock);
   10122 		return;
   10123 	}
   10124 	txmore = wm_txeof(txq, txlimit);
   10125 	wm_deferred_start_locked(txq);
   10126 	mutex_exit(txq->txq_lock);
   10127 
   10128 	mutex_enter(rxq->rxq_lock);
   10129 	if (rxq->rxq_stopping) {
   10130 		mutex_exit(rxq->rxq_lock);
   10131 		return;
   10132 	}
   10133 	WM_Q_EVCNT_INCR(rxq, defer);
   10134 	rxmore = wm_rxeof(rxq, rxlimit);
   10135 	mutex_exit(rxq->rxq_lock);
   10136 
   10137 	if (txmore || rxmore) {
   10138 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10139 		wm_sched_handle_queue(sc, wmq);
   10140 	} else
   10141 		wm_txrxintr_enable(wmq);
   10142 }
   10143 
   10144 static void
   10145 wm_handle_queue_work(struct work *wk, void *context)
   10146 {
   10147 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10148 
   10149 	/*
   10150 	 * "enqueued flag" is not required here.
   10151 	 */
   10152 	wm_handle_queue(wmq);
   10153 }
   10154 
   10155 /*
   10156  * wm_linkintr_msix:
   10157  *
   10158  *	Interrupt service routine for link status change for MSI-X.
   10159  */
   10160 static int
   10161 wm_linkintr_msix(void *arg)
   10162 {
   10163 	struct wm_softc *sc = arg;
   10164 	uint32_t reg;
   10165 	bool has_rxo;
   10166 
   10167 	reg = CSR_READ(sc, WMREG_ICR);
   10168 	WM_CORE_LOCK(sc);
   10169 	DPRINTF(sc, WM_DEBUG_LINK,
   10170 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10171 		device_xname(sc->sc_dev), reg));
   10172 
   10173 	if (sc->sc_core_stopping)
   10174 		goto out;
   10175 
   10176 	if ((reg & ICR_LSC) != 0) {
   10177 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10178 		wm_linkintr(sc, ICR_LSC);
   10179 	}
   10180 	if ((reg & ICR_GPI(0)) != 0)
   10181 		device_printf(sc->sc_dev, "got module interrupt\n");
   10182 
   10183 	/*
   10184 	 * XXX 82574 MSI-X mode workaround
   10185 	 *
   10186 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10187 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10188 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10189 	 * interrupts by writing WMREG_ICS to process receive packets.
   10190 	 */
   10191 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10192 #if defined(WM_DEBUG)
   10193 		log(LOG_WARNING, "%s: Receive overrun\n",
   10194 		    device_xname(sc->sc_dev));
   10195 #endif /* defined(WM_DEBUG) */
   10196 
   10197 		has_rxo = true;
   10198 		/*
   10199 		 * The RXO interrupt is very high rate when receive traffic is
   10200 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10201 		 * interrupts. ICR_OTHER will be enabled at the end of
   10202 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10203 		 * ICR_RXQ(1) interrupts.
   10204 		 */
   10205 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10206 
   10207 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10208 	}
   10209 
   10210 
   10211 
   10212 out:
   10213 	WM_CORE_UNLOCK(sc);
   10214 
   10215 	if (sc->sc_type == WM_T_82574) {
   10216 		if (!has_rxo)
   10217 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10218 		else
   10219 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10220 	} else if (sc->sc_type == WM_T_82575)
   10221 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10222 	else
   10223 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10224 
   10225 	return 1;
   10226 }
   10227 
   10228 /*
   10229  * Media related.
   10230  * GMII, SGMII, TBI (and SERDES)
   10231  */
   10232 
   10233 /* Common */
   10234 
   10235 /*
   10236  * wm_tbi_serdes_set_linkled:
   10237  *
   10238  *	Update the link LED on TBI and SERDES devices.
   10239  */
   10240 static void
   10241 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10242 {
   10243 
   10244 	if (sc->sc_tbi_linkup)
   10245 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10246 	else
   10247 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10248 
   10249 	/* 82540 or newer devices are active low */
   10250 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10251 
   10252 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10253 }
   10254 
   10255 /* GMII related */
   10256 
   10257 /*
   10258  * wm_gmii_reset:
   10259  *
   10260  *	Reset the PHY.
   10261  */
   10262 static void
   10263 wm_gmii_reset(struct wm_softc *sc)
   10264 {
   10265 	uint32_t reg;
   10266 	int rv;
   10267 
   10268 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10269 		device_xname(sc->sc_dev), __func__));
   10270 
   10271 	rv = sc->phy.acquire(sc);
   10272 	if (rv != 0) {
   10273 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10274 		    __func__);
   10275 		return;
   10276 	}
   10277 
   10278 	switch (sc->sc_type) {
   10279 	case WM_T_82542_2_0:
   10280 	case WM_T_82542_2_1:
   10281 		/* null */
   10282 		break;
   10283 	case WM_T_82543:
   10284 		/*
   10285 		 * With 82543, we need to force speed and duplex on the MAC
   10286 		 * equal to what the PHY speed and duplex configuration is.
   10287 		 * In addition, we need to perform a hardware reset on the PHY
   10288 		 * to take it out of reset.
   10289 		 */
   10290 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10291 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10292 
   10293 		/* The PHY reset pin is active-low. */
   10294 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10295 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10296 		    CTRL_EXT_SWDPIN(4));
   10297 		reg |= CTRL_EXT_SWDPIO(4);
   10298 
   10299 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10300 		CSR_WRITE_FLUSH(sc);
   10301 		delay(10*1000);
   10302 
   10303 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10304 		CSR_WRITE_FLUSH(sc);
   10305 		delay(150);
   10306 #if 0
   10307 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10308 #endif
   10309 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10310 		break;
   10311 	case WM_T_82544:	/* Reset 10000us */
   10312 	case WM_T_82540:
   10313 	case WM_T_82545:
   10314 	case WM_T_82545_3:
   10315 	case WM_T_82546:
   10316 	case WM_T_82546_3:
   10317 	case WM_T_82541:
   10318 	case WM_T_82541_2:
   10319 	case WM_T_82547:
   10320 	case WM_T_82547_2:
   10321 	case WM_T_82571:	/* Reset 100us */
   10322 	case WM_T_82572:
   10323 	case WM_T_82573:
   10324 	case WM_T_82574:
   10325 	case WM_T_82575:
   10326 	case WM_T_82576:
   10327 	case WM_T_82580:
   10328 	case WM_T_I350:
   10329 	case WM_T_I354:
   10330 	case WM_T_I210:
   10331 	case WM_T_I211:
   10332 	case WM_T_82583:
   10333 	case WM_T_80003:
   10334 		/* Generic reset */
   10335 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10336 		CSR_WRITE_FLUSH(sc);
   10337 		delay(20000);
   10338 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10339 		CSR_WRITE_FLUSH(sc);
   10340 		delay(20000);
   10341 
   10342 		if ((sc->sc_type == WM_T_82541)
   10343 		    || (sc->sc_type == WM_T_82541_2)
   10344 		    || (sc->sc_type == WM_T_82547)
   10345 		    || (sc->sc_type == WM_T_82547_2)) {
   10346 			/* Workaround for igp are done in igp_reset() */
   10347 			/* XXX add code to set LED after phy reset */
   10348 		}
   10349 		break;
   10350 	case WM_T_ICH8:
   10351 	case WM_T_ICH9:
   10352 	case WM_T_ICH10:
   10353 	case WM_T_PCH:
   10354 	case WM_T_PCH2:
   10355 	case WM_T_PCH_LPT:
   10356 	case WM_T_PCH_SPT:
   10357 	case WM_T_PCH_CNP:
   10358 		/* Generic reset */
   10359 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10360 		CSR_WRITE_FLUSH(sc);
   10361 		delay(100);
   10362 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10363 		CSR_WRITE_FLUSH(sc);
   10364 		delay(150);
   10365 		break;
   10366 	default:
   10367 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10368 		    __func__);
   10369 		break;
   10370 	}
   10371 
   10372 	sc->phy.release(sc);
   10373 
   10374 	/* get_cfg_done */
   10375 	wm_get_cfg_done(sc);
   10376 
   10377 	/* Extra setup */
   10378 	switch (sc->sc_type) {
   10379 	case WM_T_82542_2_0:
   10380 	case WM_T_82542_2_1:
   10381 	case WM_T_82543:
   10382 	case WM_T_82544:
   10383 	case WM_T_82540:
   10384 	case WM_T_82545:
   10385 	case WM_T_82545_3:
   10386 	case WM_T_82546:
   10387 	case WM_T_82546_3:
   10388 	case WM_T_82541_2:
   10389 	case WM_T_82547_2:
   10390 	case WM_T_82571:
   10391 	case WM_T_82572:
   10392 	case WM_T_82573:
   10393 	case WM_T_82574:
   10394 	case WM_T_82583:
   10395 	case WM_T_82575:
   10396 	case WM_T_82576:
   10397 	case WM_T_82580:
   10398 	case WM_T_I350:
   10399 	case WM_T_I354:
   10400 	case WM_T_I210:
   10401 	case WM_T_I211:
   10402 	case WM_T_80003:
   10403 		/* Null */
   10404 		break;
   10405 	case WM_T_82541:
   10406 	case WM_T_82547:
   10407 		/* XXX Configure actively LED after PHY reset */
   10408 		break;
   10409 	case WM_T_ICH8:
   10410 	case WM_T_ICH9:
   10411 	case WM_T_ICH10:
   10412 	case WM_T_PCH:
   10413 	case WM_T_PCH2:
   10414 	case WM_T_PCH_LPT:
   10415 	case WM_T_PCH_SPT:
   10416 	case WM_T_PCH_CNP:
   10417 		wm_phy_post_reset(sc);
   10418 		break;
   10419 	default:
   10420 		panic("%s: unknown type\n", __func__);
   10421 		break;
   10422 	}
   10423 }
   10424 
   10425 /*
   10426  * Setup sc_phytype and mii_{read|write}reg.
   10427  *
   10428  *  To identify PHY type, correct read/write function should be selected.
   10429  * To select correct read/write function, PCI ID or MAC type are required
   10430  * without accessing PHY registers.
   10431  *
   10432  *  On the first call of this function, PHY ID is not known yet. Check
   10433  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10434  * result might be incorrect.
   10435  *
   10436  *  In the second call, PHY OUI and model is used to identify PHY type.
   10437  * It might not be perfect because of the lack of compared entry, but it
   10438  * would be better than the first call.
   10439  *
   10440  *  If the detected new result and previous assumption is different,
   10441  * diagnous message will be printed.
   10442  */
   10443 static void
   10444 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10445     uint16_t phy_model)
   10446 {
   10447 	device_t dev = sc->sc_dev;
   10448 	struct mii_data *mii = &sc->sc_mii;
   10449 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10450 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10451 	mii_readreg_t new_readreg;
   10452 	mii_writereg_t new_writereg;
   10453 	bool dodiag = true;
   10454 
   10455 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10456 		device_xname(sc->sc_dev), __func__));
   10457 
   10458 	/*
   10459 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10460 	 * incorrect. So don't print diag output when it's 2nd call.
   10461 	 */
   10462 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10463 		dodiag = false;
   10464 
   10465 	if (mii->mii_readreg == NULL) {
   10466 		/*
   10467 		 *  This is the first call of this function. For ICH and PCH
   10468 		 * variants, it's difficult to determine the PHY access method
   10469 		 * by sc_type, so use the PCI product ID for some devices.
   10470 		 */
   10471 
   10472 		switch (sc->sc_pcidevid) {
   10473 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10474 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10475 			/* 82577 */
   10476 			new_phytype = WMPHY_82577;
   10477 			break;
   10478 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10479 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10480 			/* 82578 */
   10481 			new_phytype = WMPHY_82578;
   10482 			break;
   10483 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10484 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10485 			/* 82579 */
   10486 			new_phytype = WMPHY_82579;
   10487 			break;
   10488 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10489 		case PCI_PRODUCT_INTEL_82801I_BM:
   10490 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10491 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10492 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10493 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10494 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10495 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10496 			/* ICH8, 9, 10 with 82567 */
   10497 			new_phytype = WMPHY_BM;
   10498 			break;
   10499 		default:
   10500 			break;
   10501 		}
   10502 	} else {
   10503 		/* It's not the first call. Use PHY OUI and model */
   10504 		switch (phy_oui) {
   10505 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10506 			switch (phy_model) {
   10507 			case 0x0004: /* XXX */
   10508 				new_phytype = WMPHY_82578;
   10509 				break;
   10510 			default:
   10511 				break;
   10512 			}
   10513 			break;
   10514 		case MII_OUI_xxMARVELL:
   10515 			switch (phy_model) {
   10516 			case MII_MODEL_xxMARVELL_I210:
   10517 				new_phytype = WMPHY_I210;
   10518 				break;
   10519 			case MII_MODEL_xxMARVELL_E1011:
   10520 			case MII_MODEL_xxMARVELL_E1000_3:
   10521 			case MII_MODEL_xxMARVELL_E1000_5:
   10522 			case MII_MODEL_xxMARVELL_E1112:
   10523 				new_phytype = WMPHY_M88;
   10524 				break;
   10525 			case MII_MODEL_xxMARVELL_E1149:
   10526 				new_phytype = WMPHY_BM;
   10527 				break;
   10528 			case MII_MODEL_xxMARVELL_E1111:
   10529 			case MII_MODEL_xxMARVELL_I347:
   10530 			case MII_MODEL_xxMARVELL_E1512:
   10531 			case MII_MODEL_xxMARVELL_E1340M:
   10532 			case MII_MODEL_xxMARVELL_E1543:
   10533 				new_phytype = WMPHY_M88;
   10534 				break;
   10535 			case MII_MODEL_xxMARVELL_I82563:
   10536 				new_phytype = WMPHY_GG82563;
   10537 				break;
   10538 			default:
   10539 				break;
   10540 			}
   10541 			break;
   10542 		case MII_OUI_INTEL:
   10543 			switch (phy_model) {
   10544 			case MII_MODEL_INTEL_I82577:
   10545 				new_phytype = WMPHY_82577;
   10546 				break;
   10547 			case MII_MODEL_INTEL_I82579:
   10548 				new_phytype = WMPHY_82579;
   10549 				break;
   10550 			case MII_MODEL_INTEL_I217:
   10551 				new_phytype = WMPHY_I217;
   10552 				break;
   10553 			case MII_MODEL_INTEL_I82580:
   10554 				new_phytype = WMPHY_82580;
   10555 				break;
   10556 			case MII_MODEL_INTEL_I350:
   10557 				new_phytype = WMPHY_I350;
   10558 				break;
   10559 				break;
   10560 			default:
   10561 				break;
   10562 			}
   10563 			break;
   10564 		case MII_OUI_yyINTEL:
   10565 			switch (phy_model) {
   10566 			case MII_MODEL_yyINTEL_I82562G:
   10567 			case MII_MODEL_yyINTEL_I82562EM:
   10568 			case MII_MODEL_yyINTEL_I82562ET:
   10569 				new_phytype = WMPHY_IFE;
   10570 				break;
   10571 			case MII_MODEL_yyINTEL_IGP01E1000:
   10572 				new_phytype = WMPHY_IGP;
   10573 				break;
   10574 			case MII_MODEL_yyINTEL_I82566:
   10575 				new_phytype = WMPHY_IGP_3;
   10576 				break;
   10577 			default:
   10578 				break;
   10579 			}
   10580 			break;
   10581 		default:
   10582 			break;
   10583 		}
   10584 
   10585 		if (dodiag) {
   10586 			if (new_phytype == WMPHY_UNKNOWN)
   10587 				aprint_verbose_dev(dev,
   10588 				    "%s: Unknown PHY model. OUI=%06x, "
   10589 				    "model=%04x\n", __func__, phy_oui,
   10590 				    phy_model);
   10591 
   10592 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10593 			    && (sc->sc_phytype != new_phytype)) {
   10594 				aprint_error_dev(dev, "Previously assumed PHY "
   10595 				    "type(%u) was incorrect. PHY type from PHY"
   10596 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10597 			}
   10598 		}
   10599 	}
   10600 
   10601 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10602 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10603 		/* SGMII */
   10604 		new_readreg = wm_sgmii_readreg;
   10605 		new_writereg = wm_sgmii_writereg;
   10606 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10607 		/* BM2 (phyaddr == 1) */
   10608 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10609 		    && (new_phytype != WMPHY_BM)
   10610 		    && (new_phytype != WMPHY_UNKNOWN))
   10611 			doubt_phytype = new_phytype;
   10612 		new_phytype = WMPHY_BM;
   10613 		new_readreg = wm_gmii_bm_readreg;
   10614 		new_writereg = wm_gmii_bm_writereg;
   10615 	} else if (sc->sc_type >= WM_T_PCH) {
   10616 		/* All PCH* use _hv_ */
   10617 		new_readreg = wm_gmii_hv_readreg;
   10618 		new_writereg = wm_gmii_hv_writereg;
   10619 	} else if (sc->sc_type >= WM_T_ICH8) {
   10620 		/* non-82567 ICH8, 9 and 10 */
   10621 		new_readreg = wm_gmii_i82544_readreg;
   10622 		new_writereg = wm_gmii_i82544_writereg;
   10623 	} else if (sc->sc_type >= WM_T_80003) {
   10624 		/* 80003 */
   10625 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10626 		    && (new_phytype != WMPHY_GG82563)
   10627 		    && (new_phytype != WMPHY_UNKNOWN))
   10628 			doubt_phytype = new_phytype;
   10629 		new_phytype = WMPHY_GG82563;
   10630 		new_readreg = wm_gmii_i80003_readreg;
   10631 		new_writereg = wm_gmii_i80003_writereg;
   10632 	} else if (sc->sc_type >= WM_T_I210) {
   10633 		/* I210 and I211 */
   10634 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10635 		    && (new_phytype != WMPHY_I210)
   10636 		    && (new_phytype != WMPHY_UNKNOWN))
   10637 			doubt_phytype = new_phytype;
   10638 		new_phytype = WMPHY_I210;
   10639 		new_readreg = wm_gmii_gs40g_readreg;
   10640 		new_writereg = wm_gmii_gs40g_writereg;
   10641 	} else if (sc->sc_type >= WM_T_82580) {
   10642 		/* 82580, I350 and I354 */
   10643 		new_readreg = wm_gmii_82580_readreg;
   10644 		new_writereg = wm_gmii_82580_writereg;
   10645 	} else if (sc->sc_type >= WM_T_82544) {
   10646 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10647 		new_readreg = wm_gmii_i82544_readreg;
   10648 		new_writereg = wm_gmii_i82544_writereg;
   10649 	} else {
   10650 		new_readreg = wm_gmii_i82543_readreg;
   10651 		new_writereg = wm_gmii_i82543_writereg;
   10652 	}
   10653 
   10654 	if (new_phytype == WMPHY_BM) {
   10655 		/* All BM use _bm_ */
   10656 		new_readreg = wm_gmii_bm_readreg;
   10657 		new_writereg = wm_gmii_bm_writereg;
   10658 	}
   10659 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10660 		/* All PCH* use _hv_ */
   10661 		new_readreg = wm_gmii_hv_readreg;
   10662 		new_writereg = wm_gmii_hv_writereg;
   10663 	}
   10664 
   10665 	/* Diag output */
   10666 	if (dodiag) {
   10667 		if (doubt_phytype != WMPHY_UNKNOWN)
   10668 			aprint_error_dev(dev, "Assumed new PHY type was "
   10669 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10670 			    new_phytype);
   10671 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10672 		    && (sc->sc_phytype != new_phytype))
   10673 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10674 			    "was incorrect. New PHY type = %u\n",
   10675 			    sc->sc_phytype, new_phytype);
   10676 
   10677 		if ((mii->mii_readreg != NULL) &&
   10678 		    (new_phytype == WMPHY_UNKNOWN))
   10679 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10680 
   10681 		if ((mii->mii_readreg != NULL) &&
   10682 		    (mii->mii_readreg != new_readreg))
   10683 			aprint_error_dev(dev, "Previously assumed PHY "
   10684 			    "read/write function was incorrect.\n");
   10685 	}
   10686 
   10687 	/* Update now */
   10688 	sc->sc_phytype = new_phytype;
   10689 	mii->mii_readreg = new_readreg;
   10690 	mii->mii_writereg = new_writereg;
   10691 	if (new_readreg == wm_gmii_hv_readreg) {
   10692 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10693 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10694 	} else if (new_readreg == wm_sgmii_readreg) {
   10695 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10696 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10697 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10698 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10699 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10700 	}
   10701 }
   10702 
   10703 /*
   10704  * wm_get_phy_id_82575:
   10705  *
   10706  * Return PHY ID. Return -1 if it failed.
   10707  */
   10708 static int
   10709 wm_get_phy_id_82575(struct wm_softc *sc)
   10710 {
   10711 	uint32_t reg;
   10712 	int phyid = -1;
   10713 
   10714 	/* XXX */
   10715 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10716 		return -1;
   10717 
   10718 	if (wm_sgmii_uses_mdio(sc)) {
   10719 		switch (sc->sc_type) {
   10720 		case WM_T_82575:
   10721 		case WM_T_82576:
   10722 			reg = CSR_READ(sc, WMREG_MDIC);
   10723 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10724 			break;
   10725 		case WM_T_82580:
   10726 		case WM_T_I350:
   10727 		case WM_T_I354:
   10728 		case WM_T_I210:
   10729 		case WM_T_I211:
   10730 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10731 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10732 			break;
   10733 		default:
   10734 			return -1;
   10735 		}
   10736 	}
   10737 
   10738 	return phyid;
   10739 }
   10740 
   10741 /*
   10742  * wm_gmii_mediainit:
   10743  *
   10744  *	Initialize media for use on 1000BASE-T devices.
   10745  */
   10746 static void
   10747 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10748 {
   10749 	device_t dev = sc->sc_dev;
   10750 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10751 	struct mii_data *mii = &sc->sc_mii;
   10752 
   10753 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10754 		device_xname(sc->sc_dev), __func__));
   10755 
   10756 	/* We have GMII. */
   10757 	sc->sc_flags |= WM_F_HAS_MII;
   10758 
   10759 	if (sc->sc_type == WM_T_80003)
   10760 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10761 	else
   10762 		sc->sc_tipg = TIPG_1000T_DFLT;
   10763 
   10764 	/*
   10765 	 * Let the chip set speed/duplex on its own based on
   10766 	 * signals from the PHY.
   10767 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10768 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10769 	 */
   10770 	sc->sc_ctrl |= CTRL_SLU;
   10771 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10772 
   10773 	/* Initialize our media structures and probe the GMII. */
   10774 	mii->mii_ifp = ifp;
   10775 
   10776 	mii->mii_statchg = wm_gmii_statchg;
   10777 
   10778 	/* get PHY control from SMBus to PCIe */
   10779 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10780 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10781 	    || (sc->sc_type == WM_T_PCH_CNP))
   10782 		wm_init_phy_workarounds_pchlan(sc);
   10783 
   10784 	wm_gmii_reset(sc);
   10785 
   10786 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10787 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10788 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10789 
   10790 	/* Setup internal SGMII PHY for SFP */
   10791 	wm_sgmii_sfp_preconfig(sc);
   10792 
   10793 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10794 	    || (sc->sc_type == WM_T_82580)
   10795 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10796 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10797 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10798 			/* Attach only one port */
   10799 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10800 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10801 		} else {
   10802 			int i, id;
   10803 			uint32_t ctrl_ext;
   10804 
   10805 			id = wm_get_phy_id_82575(sc);
   10806 			if (id != -1) {
   10807 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10808 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10809 			}
   10810 			if ((id == -1)
   10811 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10812 				/* Power on sgmii phy if it is disabled */
   10813 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10814 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10815 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10816 				CSR_WRITE_FLUSH(sc);
   10817 				delay(300*1000); /* XXX too long */
   10818 
   10819 				/*
   10820 				 * From 1 to 8.
   10821 				 *
   10822 				 * I2C access fails with I2C register's ERROR
   10823 				 * bit set, so prevent error message while
   10824 				 * scanning.
   10825 				 */
   10826 				sc->phy.no_errprint = true;
   10827 				for (i = 1; i < 8; i++)
   10828 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10829 					    0xffffffff, i, MII_OFFSET_ANY,
   10830 					    MIIF_DOPAUSE);
   10831 				sc->phy.no_errprint = false;
   10832 
   10833 				/* Restore previous sfp cage power state */
   10834 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10835 			}
   10836 		}
   10837 	} else
   10838 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10839 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10840 
   10841 	/*
   10842 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10843 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10844 	 */
   10845 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10846 		|| (sc->sc_type == WM_T_PCH_SPT)
   10847 		|| (sc->sc_type == WM_T_PCH_CNP))
   10848 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10849 		wm_set_mdio_slow_mode_hv(sc);
   10850 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10851 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10852 	}
   10853 
   10854 	/*
   10855 	 * (For ICH8 variants)
   10856 	 * If PHY detection failed, use BM's r/w function and retry.
   10857 	 */
   10858 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10859 		/* if failed, retry with *_bm_* */
   10860 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10861 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10862 		    sc->sc_phytype);
   10863 		sc->sc_phytype = WMPHY_BM;
   10864 		mii->mii_readreg = wm_gmii_bm_readreg;
   10865 		mii->mii_writereg = wm_gmii_bm_writereg;
   10866 
   10867 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10868 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10869 	}
   10870 
   10871 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10872 		/* Any PHY wasn't find */
   10873 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10874 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10875 		sc->sc_phytype = WMPHY_NONE;
   10876 	} else {
   10877 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10878 
   10879 		/*
   10880 		 * PHY Found! Check PHY type again by the second call of
   10881 		 * wm_gmii_setup_phytype.
   10882 		 */
   10883 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10884 		    child->mii_mpd_model);
   10885 
   10886 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10887 	}
   10888 }
   10889 
   10890 /*
   10891  * wm_gmii_mediachange:	[ifmedia interface function]
   10892  *
   10893  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10894  */
   10895 static int
   10896 wm_gmii_mediachange(struct ifnet *ifp)
   10897 {
   10898 	struct wm_softc *sc = ifp->if_softc;
   10899 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10900 	uint32_t reg;
   10901 	int rc;
   10902 
   10903 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10904 		device_xname(sc->sc_dev), __func__));
   10905 	if ((ifp->if_flags & IFF_UP) == 0)
   10906 		return 0;
   10907 
   10908 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10909 	if ((sc->sc_type == WM_T_82580)
   10910 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10911 	    || (sc->sc_type == WM_T_I211)) {
   10912 		reg = CSR_READ(sc, WMREG_PHPM);
   10913 		reg &= ~PHPM_GO_LINK_D;
   10914 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10915 	}
   10916 
   10917 	/* Disable D0 LPLU. */
   10918 	wm_lplu_d0_disable(sc);
   10919 
   10920 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10921 	sc->sc_ctrl |= CTRL_SLU;
   10922 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10923 	    || (sc->sc_type > WM_T_82543)) {
   10924 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10925 	} else {
   10926 		sc->sc_ctrl &= ~CTRL_ASDE;
   10927 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10928 		if (ife->ifm_media & IFM_FDX)
   10929 			sc->sc_ctrl |= CTRL_FD;
   10930 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10931 		case IFM_10_T:
   10932 			sc->sc_ctrl |= CTRL_SPEED_10;
   10933 			break;
   10934 		case IFM_100_TX:
   10935 			sc->sc_ctrl |= CTRL_SPEED_100;
   10936 			break;
   10937 		case IFM_1000_T:
   10938 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10939 			break;
   10940 		case IFM_NONE:
   10941 			/* There is no specific setting for IFM_NONE */
   10942 			break;
   10943 		default:
   10944 			panic("wm_gmii_mediachange: bad media 0x%x",
   10945 			    ife->ifm_media);
   10946 		}
   10947 	}
   10948 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10949 	CSR_WRITE_FLUSH(sc);
   10950 
   10951 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10952 		wm_serdes_mediachange(ifp);
   10953 
   10954 	if (sc->sc_type <= WM_T_82543)
   10955 		wm_gmii_reset(sc);
   10956 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10957 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10958 		/* allow time for SFP cage time to power up phy */
   10959 		delay(300 * 1000);
   10960 		wm_gmii_reset(sc);
   10961 	}
   10962 
   10963 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10964 		return 0;
   10965 	return rc;
   10966 }
   10967 
   10968 /*
   10969  * wm_gmii_mediastatus:	[ifmedia interface function]
   10970  *
   10971  *	Get the current interface media status on a 1000BASE-T device.
   10972  */
   10973 static void
   10974 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10975 {
   10976 	struct wm_softc *sc = ifp->if_softc;
   10977 
   10978 	ether_mediastatus(ifp, ifmr);
   10979 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10980 	    | sc->sc_flowflags;
   10981 }
   10982 
   10983 #define	MDI_IO		CTRL_SWDPIN(2)
   10984 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10985 #define	MDI_CLK		CTRL_SWDPIN(3)
   10986 
   10987 static void
   10988 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10989 {
   10990 	uint32_t i, v;
   10991 
   10992 	v = CSR_READ(sc, WMREG_CTRL);
   10993 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10994 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10995 
   10996 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10997 		if (data & i)
   10998 			v |= MDI_IO;
   10999 		else
   11000 			v &= ~MDI_IO;
   11001 		CSR_WRITE(sc, WMREG_CTRL, v);
   11002 		CSR_WRITE_FLUSH(sc);
   11003 		delay(10);
   11004 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11005 		CSR_WRITE_FLUSH(sc);
   11006 		delay(10);
   11007 		CSR_WRITE(sc, WMREG_CTRL, v);
   11008 		CSR_WRITE_FLUSH(sc);
   11009 		delay(10);
   11010 	}
   11011 }
   11012 
   11013 static uint16_t
   11014 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11015 {
   11016 	uint32_t v, i;
   11017 	uint16_t data = 0;
   11018 
   11019 	v = CSR_READ(sc, WMREG_CTRL);
   11020 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11021 	v |= CTRL_SWDPIO(3);
   11022 
   11023 	CSR_WRITE(sc, WMREG_CTRL, v);
   11024 	CSR_WRITE_FLUSH(sc);
   11025 	delay(10);
   11026 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11027 	CSR_WRITE_FLUSH(sc);
   11028 	delay(10);
   11029 	CSR_WRITE(sc, WMREG_CTRL, v);
   11030 	CSR_WRITE_FLUSH(sc);
   11031 	delay(10);
   11032 
   11033 	for (i = 0; i < 16; i++) {
   11034 		data <<= 1;
   11035 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11036 		CSR_WRITE_FLUSH(sc);
   11037 		delay(10);
   11038 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11039 			data |= 1;
   11040 		CSR_WRITE(sc, WMREG_CTRL, v);
   11041 		CSR_WRITE_FLUSH(sc);
   11042 		delay(10);
   11043 	}
   11044 
   11045 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11046 	CSR_WRITE_FLUSH(sc);
   11047 	delay(10);
   11048 	CSR_WRITE(sc, WMREG_CTRL, v);
   11049 	CSR_WRITE_FLUSH(sc);
   11050 	delay(10);
   11051 
   11052 	return data;
   11053 }
   11054 
   11055 #undef MDI_IO
   11056 #undef MDI_DIR
   11057 #undef MDI_CLK
   11058 
   11059 /*
   11060  * wm_gmii_i82543_readreg:	[mii interface function]
   11061  *
   11062  *	Read a PHY register on the GMII (i82543 version).
   11063  */
   11064 static int
   11065 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11066 {
   11067 	struct wm_softc *sc = device_private(dev);
   11068 
   11069 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11070 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11071 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11072 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11073 
   11074 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11075 		device_xname(dev), phy, reg, *val));
   11076 
   11077 	return 0;
   11078 }
   11079 
   11080 /*
   11081  * wm_gmii_i82543_writereg:	[mii interface function]
   11082  *
   11083  *	Write a PHY register on the GMII (i82543 version).
   11084  */
   11085 static int
   11086 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11087 {
   11088 	struct wm_softc *sc = device_private(dev);
   11089 
   11090 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11091 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11092 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11093 	    (MII_COMMAND_START << 30), 32);
   11094 
   11095 	return 0;
   11096 }
   11097 
   11098 /*
   11099  * wm_gmii_mdic_readreg:	[mii interface function]
   11100  *
   11101  *	Read a PHY register on the GMII.
   11102  */
   11103 static int
   11104 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11105 {
   11106 	struct wm_softc *sc = device_private(dev);
   11107 	uint32_t mdic = 0;
   11108 	int i;
   11109 
   11110 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11111 	    && (reg > MII_ADDRMASK)) {
   11112 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11113 		    __func__, sc->sc_phytype, reg);
   11114 		reg &= MII_ADDRMASK;
   11115 	}
   11116 
   11117 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11118 	    MDIC_REGADD(reg));
   11119 
   11120 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11121 		delay(50);
   11122 		mdic = CSR_READ(sc, WMREG_MDIC);
   11123 		if (mdic & MDIC_READY)
   11124 			break;
   11125 	}
   11126 
   11127 	if ((mdic & MDIC_READY) == 0) {
   11128 		DPRINTF(sc, WM_DEBUG_GMII,
   11129 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11130 			device_xname(dev), phy, reg));
   11131 		return ETIMEDOUT;
   11132 	} else if (mdic & MDIC_E) {
   11133 		/* This is normal if no PHY is present. */
   11134 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   11135 			device_xname(sc->sc_dev), phy, reg));
   11136 		return -1;
   11137 	} else
   11138 		*val = MDIC_DATA(mdic);
   11139 
   11140 	/*
   11141 	 * Allow some time after each MDIC transaction to avoid
   11142 	 * reading duplicate data in the next MDIC transaction.
   11143 	 */
   11144 	if (sc->sc_type == WM_T_PCH2)
   11145 		delay(100);
   11146 
   11147 	return 0;
   11148 }
   11149 
   11150 /*
   11151  * wm_gmii_mdic_writereg:	[mii interface function]
   11152  *
   11153  *	Write a PHY register on the GMII.
   11154  */
   11155 static int
   11156 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11157 {
   11158 	struct wm_softc *sc = device_private(dev);
   11159 	uint32_t mdic = 0;
   11160 	int i;
   11161 
   11162 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11163 	    && (reg > MII_ADDRMASK)) {
   11164 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11165 		    __func__, sc->sc_phytype, reg);
   11166 		reg &= MII_ADDRMASK;
   11167 	}
   11168 
   11169 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11170 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11171 
   11172 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11173 		delay(50);
   11174 		mdic = CSR_READ(sc, WMREG_MDIC);
   11175 		if (mdic & MDIC_READY)
   11176 			break;
   11177 	}
   11178 
   11179 	if ((mdic & MDIC_READY) == 0) {
   11180 		DPRINTF(sc, WM_DEBUG_GMII,
   11181 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11182 			device_xname(dev), phy, reg));
   11183 		return ETIMEDOUT;
   11184 	} else if (mdic & MDIC_E) {
   11185 		DPRINTF(sc, WM_DEBUG_GMII,
   11186 		    ("%s: MDIC write error: phy %d reg %d\n",
   11187 			device_xname(dev), phy, reg));
   11188 		return -1;
   11189 	}
   11190 
   11191 	/*
   11192 	 * Allow some time after each MDIC transaction to avoid
   11193 	 * reading duplicate data in the next MDIC transaction.
   11194 	 */
   11195 	if (sc->sc_type == WM_T_PCH2)
   11196 		delay(100);
   11197 
   11198 	return 0;
   11199 }
   11200 
   11201 /*
   11202  * wm_gmii_i82544_readreg:	[mii interface function]
   11203  *
   11204  *	Read a PHY register on the GMII.
   11205  */
   11206 static int
   11207 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11208 {
   11209 	struct wm_softc *sc = device_private(dev);
   11210 	int rv;
   11211 
   11212 	if (sc->phy.acquire(sc)) {
   11213 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11214 		return -1;
   11215 	}
   11216 
   11217 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11218 
   11219 	sc->phy.release(sc);
   11220 
   11221 	return rv;
   11222 }
   11223 
   11224 static int
   11225 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11226 {
   11227 	struct wm_softc *sc = device_private(dev);
   11228 	int rv;
   11229 
   11230 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11231 		switch (sc->sc_phytype) {
   11232 		case WMPHY_IGP:
   11233 		case WMPHY_IGP_2:
   11234 		case WMPHY_IGP_3:
   11235 			rv = wm_gmii_mdic_writereg(dev, phy,
   11236 			    IGPHY_PAGE_SELECT, reg);
   11237 			if (rv != 0)
   11238 				return rv;
   11239 			break;
   11240 		default:
   11241 #ifdef WM_DEBUG
   11242 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11243 			    __func__, sc->sc_phytype, reg);
   11244 #endif
   11245 			break;
   11246 		}
   11247 	}
   11248 
   11249 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11250 }
   11251 
   11252 /*
   11253  * wm_gmii_i82544_writereg:	[mii interface function]
   11254  *
   11255  *	Write a PHY register on the GMII.
   11256  */
   11257 static int
   11258 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11259 {
   11260 	struct wm_softc *sc = device_private(dev);
   11261 	int rv;
   11262 
   11263 	if (sc->phy.acquire(sc)) {
   11264 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11265 		return -1;
   11266 	}
   11267 
   11268 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11269 	sc->phy.release(sc);
   11270 
   11271 	return rv;
   11272 }
   11273 
   11274 static int
   11275 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11276 {
   11277 	struct wm_softc *sc = device_private(dev);
   11278 	int rv;
   11279 
   11280 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11281 		switch (sc->sc_phytype) {
   11282 		case WMPHY_IGP:
   11283 		case WMPHY_IGP_2:
   11284 		case WMPHY_IGP_3:
   11285 			rv = wm_gmii_mdic_writereg(dev, phy,
   11286 			    IGPHY_PAGE_SELECT, reg);
   11287 			if (rv != 0)
   11288 				return rv;
   11289 			break;
   11290 		default:
   11291 #ifdef WM_DEBUG
   11292 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11293 			    __func__, sc->sc_phytype, reg);
   11294 #endif
   11295 			break;
   11296 		}
   11297 	}
   11298 
   11299 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11300 }
   11301 
   11302 /*
   11303  * wm_gmii_i80003_readreg:	[mii interface function]
   11304  *
   11305  *	Read a PHY register on the kumeran
   11306  * This could be handled by the PHY layer if we didn't have to lock the
   11307  * resource ...
   11308  */
   11309 static int
   11310 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11311 {
   11312 	struct wm_softc *sc = device_private(dev);
   11313 	int page_select;
   11314 	uint16_t temp, temp2;
   11315 	int rv = 0;
   11316 
   11317 	if (phy != 1) /* Only one PHY on kumeran bus */
   11318 		return -1;
   11319 
   11320 	if (sc->phy.acquire(sc)) {
   11321 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11322 		return -1;
   11323 	}
   11324 
   11325 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11326 		page_select = GG82563_PHY_PAGE_SELECT;
   11327 	else {
   11328 		/*
   11329 		 * Use Alternative Page Select register to access registers
   11330 		 * 30 and 31.
   11331 		 */
   11332 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11333 	}
   11334 	temp = reg >> GG82563_PAGE_SHIFT;
   11335 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11336 		goto out;
   11337 
   11338 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11339 		/*
   11340 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11341 		 * register.
   11342 		 */
   11343 		delay(200);
   11344 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11345 		if ((rv != 0) || (temp2 != temp)) {
   11346 			device_printf(dev, "%s failed\n", __func__);
   11347 			rv = -1;
   11348 			goto out;
   11349 		}
   11350 		delay(200);
   11351 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11352 		delay(200);
   11353 	} else
   11354 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11355 
   11356 out:
   11357 	sc->phy.release(sc);
   11358 	return rv;
   11359 }
   11360 
   11361 /*
   11362  * wm_gmii_i80003_writereg:	[mii interface function]
   11363  *
   11364  *	Write a PHY register on the kumeran.
   11365  * This could be handled by the PHY layer if we didn't have to lock the
   11366  * resource ...
   11367  */
   11368 static int
   11369 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11370 {
   11371 	struct wm_softc *sc = device_private(dev);
   11372 	int page_select, rv;
   11373 	uint16_t temp, temp2;
   11374 
   11375 	if (phy != 1) /* Only one PHY on kumeran bus */
   11376 		return -1;
   11377 
   11378 	if (sc->phy.acquire(sc)) {
   11379 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11380 		return -1;
   11381 	}
   11382 
   11383 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11384 		page_select = GG82563_PHY_PAGE_SELECT;
   11385 	else {
   11386 		/*
   11387 		 * Use Alternative Page Select register to access registers
   11388 		 * 30 and 31.
   11389 		 */
   11390 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11391 	}
   11392 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11393 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11394 		goto out;
   11395 
   11396 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11397 		/*
   11398 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11399 		 * register.
   11400 		 */
   11401 		delay(200);
   11402 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11403 		if ((rv != 0) || (temp2 != temp)) {
   11404 			device_printf(dev, "%s failed\n", __func__);
   11405 			rv = -1;
   11406 			goto out;
   11407 		}
   11408 		delay(200);
   11409 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11410 		delay(200);
   11411 	} else
   11412 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11413 
   11414 out:
   11415 	sc->phy.release(sc);
   11416 	return rv;
   11417 }
   11418 
   11419 /*
   11420  * wm_gmii_bm_readreg:	[mii interface function]
   11421  *
   11422  *	Read a PHY register on the kumeran
   11423  * This could be handled by the PHY layer if we didn't have to lock the
   11424  * resource ...
   11425  */
   11426 static int
   11427 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11428 {
   11429 	struct wm_softc *sc = device_private(dev);
   11430 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11431 	int rv;
   11432 
   11433 	if (sc->phy.acquire(sc)) {
   11434 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11435 		return -1;
   11436 	}
   11437 
   11438 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11439 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11440 		    || (reg == 31)) ? 1 : phy;
   11441 	/* Page 800 works differently than the rest so it has its own func */
   11442 	if (page == BM_WUC_PAGE) {
   11443 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11444 		goto release;
   11445 	}
   11446 
   11447 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11448 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11449 		    && (sc->sc_type != WM_T_82583))
   11450 			rv = wm_gmii_mdic_writereg(dev, phy,
   11451 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11452 		else
   11453 			rv = wm_gmii_mdic_writereg(dev, phy,
   11454 			    BME1000_PHY_PAGE_SELECT, page);
   11455 		if (rv != 0)
   11456 			goto release;
   11457 	}
   11458 
   11459 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11460 
   11461 release:
   11462 	sc->phy.release(sc);
   11463 	return rv;
   11464 }
   11465 
   11466 /*
   11467  * wm_gmii_bm_writereg:	[mii interface function]
   11468  *
   11469  *	Write a PHY register on the kumeran.
   11470  * This could be handled by the PHY layer if we didn't have to lock the
   11471  * resource ...
   11472  */
   11473 static int
   11474 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11475 {
   11476 	struct wm_softc *sc = device_private(dev);
   11477 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11478 	int rv;
   11479 
   11480 	if (sc->phy.acquire(sc)) {
   11481 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11482 		return -1;
   11483 	}
   11484 
   11485 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11486 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11487 		    || (reg == 31)) ? 1 : phy;
   11488 	/* Page 800 works differently than the rest so it has its own func */
   11489 	if (page == BM_WUC_PAGE) {
   11490 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11491 		goto release;
   11492 	}
   11493 
   11494 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11495 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11496 		    && (sc->sc_type != WM_T_82583))
   11497 			rv = wm_gmii_mdic_writereg(dev, phy,
   11498 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11499 		else
   11500 			rv = wm_gmii_mdic_writereg(dev, phy,
   11501 			    BME1000_PHY_PAGE_SELECT, page);
   11502 		if (rv != 0)
   11503 			goto release;
   11504 	}
   11505 
   11506 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11507 
   11508 release:
   11509 	sc->phy.release(sc);
   11510 	return rv;
   11511 }
   11512 
   11513 /*
   11514  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11515  *  @dev: pointer to the HW structure
   11516  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11517  *
   11518  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11519  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11520  */
   11521 static int
   11522 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11523 {
   11524 #ifdef WM_DEBUG
   11525 	struct wm_softc *sc = device_private(dev);
   11526 #endif
   11527 	uint16_t temp;
   11528 	int rv;
   11529 
   11530 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11531 		device_xname(dev), __func__));
   11532 
   11533 	if (!phy_regp)
   11534 		return -1;
   11535 
   11536 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11537 
   11538 	/* Select Port Control Registers page */
   11539 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11540 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11541 	if (rv != 0)
   11542 		return rv;
   11543 
   11544 	/* Read WUCE and save it */
   11545 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11546 	if (rv != 0)
   11547 		return rv;
   11548 
   11549 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11550 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11551 	 */
   11552 	temp = *phy_regp;
   11553 	temp |= BM_WUC_ENABLE_BIT;
   11554 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11555 
   11556 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11557 		return rv;
   11558 
   11559 	/* Select Host Wakeup Registers page - caller now able to write
   11560 	 * registers on the Wakeup registers page
   11561 	 */
   11562 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11563 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11564 }
   11565 
   11566 /*
   11567  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11568  *  @dev: pointer to the HW structure
   11569  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11570  *
   11571  *  Restore BM_WUC_ENABLE_REG to its original value.
   11572  *
   11573  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11574  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11575  *  caller.
   11576  */
   11577 static int
   11578 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11579 {
   11580 #ifdef WM_DEBUG
   11581 	struct wm_softc *sc = device_private(dev);
   11582 #endif
   11583 
   11584 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11585 		device_xname(dev), __func__));
   11586 
   11587 	if (!phy_regp)
   11588 		return -1;
   11589 
   11590 	/* Select Port Control Registers page */
   11591 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11592 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11593 
   11594 	/* Restore 769.17 to its original value */
   11595 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11596 
   11597 	return 0;
   11598 }
   11599 
   11600 /*
   11601  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11602  *  @sc: pointer to the HW structure
   11603  *  @offset: register offset to be read or written
   11604  *  @val: pointer to the data to read or write
   11605  *  @rd: determines if operation is read or write
   11606  *  @page_set: BM_WUC_PAGE already set and access enabled
   11607  *
   11608  *  Read the PHY register at offset and store the retrieved information in
   11609  *  data, or write data to PHY register at offset.  Note the procedure to
   11610  *  access the PHY wakeup registers is different than reading the other PHY
   11611  *  registers. It works as such:
   11612  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11613  *  2) Set page to 800 for host (801 if we were manageability)
   11614  *  3) Write the address using the address opcode (0x11)
   11615  *  4) Read or write the data using the data opcode (0x12)
   11616  *  5) Restore 769.17.2 to its original value
   11617  *
   11618  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11619  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11620  *
   11621  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11622  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11623  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11624  */
   11625 static int
   11626 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11627 	bool page_set)
   11628 {
   11629 	struct wm_softc *sc = device_private(dev);
   11630 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11631 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11632 	uint16_t wuce;
   11633 	int rv = 0;
   11634 
   11635 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11636 		device_xname(dev), __func__));
   11637 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11638 	if ((sc->sc_type == WM_T_PCH)
   11639 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11640 		device_printf(dev,
   11641 		    "Attempting to access page %d while gig enabled.\n", page);
   11642 	}
   11643 
   11644 	if (!page_set) {
   11645 		/* Enable access to PHY wakeup registers */
   11646 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11647 		if (rv != 0) {
   11648 			device_printf(dev,
   11649 			    "%s: Could not enable PHY wakeup reg access\n",
   11650 			    __func__);
   11651 			return rv;
   11652 		}
   11653 	}
   11654 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11655 		device_xname(sc->sc_dev), __func__, page, regnum));
   11656 
   11657 	/*
   11658 	 * 2) Access PHY wakeup register.
   11659 	 * See wm_access_phy_wakeup_reg_bm.
   11660 	 */
   11661 
   11662 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11663 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11664 	if (rv != 0)
   11665 		return rv;
   11666 
   11667 	if (rd) {
   11668 		/* Read the Wakeup register page value using opcode 0x12 */
   11669 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11670 	} else {
   11671 		/* Write the Wakeup register page value using opcode 0x12 */
   11672 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11673 	}
   11674 	if (rv != 0)
   11675 		return rv;
   11676 
   11677 	if (!page_set)
   11678 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11679 
   11680 	return rv;
   11681 }
   11682 
   11683 /*
   11684  * wm_gmii_hv_readreg:	[mii interface function]
   11685  *
   11686  *	Read a PHY register on the kumeran
   11687  * This could be handled by the PHY layer if we didn't have to lock the
   11688  * resource ...
   11689  */
   11690 static int
   11691 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11692 {
   11693 	struct wm_softc *sc = device_private(dev);
   11694 	int rv;
   11695 
   11696 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11697 		device_xname(dev), __func__));
   11698 	if (sc->phy.acquire(sc)) {
   11699 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11700 		return -1;
   11701 	}
   11702 
   11703 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11704 	sc->phy.release(sc);
   11705 	return rv;
   11706 }
   11707 
   11708 static int
   11709 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11710 {
   11711 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11712 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11713 	int rv;
   11714 
   11715 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11716 
   11717 	/* Page 800 works differently than the rest so it has its own func */
   11718 	if (page == BM_WUC_PAGE)
   11719 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11720 
   11721 	/*
   11722 	 * Lower than page 768 works differently than the rest so it has its
   11723 	 * own func
   11724 	 */
   11725 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11726 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11727 		return -1;
   11728 	}
   11729 
   11730 	/*
   11731 	 * XXX I21[789] documents say that the SMBus Address register is at
   11732 	 * PHY address 01, Page 0 (not 768), Register 26.
   11733 	 */
   11734 	if (page == HV_INTC_FC_PAGE_START)
   11735 		page = 0;
   11736 
   11737 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11738 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11739 		    page << BME1000_PAGE_SHIFT);
   11740 		if (rv != 0)
   11741 			return rv;
   11742 	}
   11743 
   11744 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11745 }
   11746 
   11747 /*
   11748  * wm_gmii_hv_writereg:	[mii interface function]
   11749  *
   11750  *	Write a PHY register on the kumeran.
   11751  * This could be handled by the PHY layer if we didn't have to lock the
   11752  * resource ...
   11753  */
   11754 static int
   11755 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11756 {
   11757 	struct wm_softc *sc = device_private(dev);
   11758 	int rv;
   11759 
   11760 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11761 		device_xname(dev), __func__));
   11762 
   11763 	if (sc->phy.acquire(sc)) {
   11764 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11765 		return -1;
   11766 	}
   11767 
   11768 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11769 	sc->phy.release(sc);
   11770 
   11771 	return rv;
   11772 }
   11773 
   11774 static int
   11775 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11776 {
   11777 	struct wm_softc *sc = device_private(dev);
   11778 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11779 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11780 	int rv;
   11781 
   11782 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11783 
   11784 	/* Page 800 works differently than the rest so it has its own func */
   11785 	if (page == BM_WUC_PAGE)
   11786 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11787 		    false);
   11788 
   11789 	/*
   11790 	 * Lower than page 768 works differently than the rest so it has its
   11791 	 * own func
   11792 	 */
   11793 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11794 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11795 		return -1;
   11796 	}
   11797 
   11798 	{
   11799 		/*
   11800 		 * XXX I21[789] documents say that the SMBus Address register
   11801 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11802 		 */
   11803 		if (page == HV_INTC_FC_PAGE_START)
   11804 			page = 0;
   11805 
   11806 		/*
   11807 		 * XXX Workaround MDIO accesses being disabled after entering
   11808 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11809 		 * register is set)
   11810 		 */
   11811 		if (sc->sc_phytype == WMPHY_82578) {
   11812 			struct mii_softc *child;
   11813 
   11814 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11815 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11816 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11817 			    && ((val & (1 << 11)) != 0)) {
   11818 				device_printf(dev, "XXX need workaround\n");
   11819 			}
   11820 		}
   11821 
   11822 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11823 			rv = wm_gmii_mdic_writereg(dev, 1,
   11824 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11825 			if (rv != 0)
   11826 				return rv;
   11827 		}
   11828 	}
   11829 
   11830 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11831 }
   11832 
   11833 /*
   11834  * wm_gmii_82580_readreg:	[mii interface function]
   11835  *
   11836  *	Read a PHY register on the 82580 and I350.
   11837  * This could be handled by the PHY layer if we didn't have to lock the
   11838  * resource ...
   11839  */
   11840 static int
   11841 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11842 {
   11843 	struct wm_softc *sc = device_private(dev);
   11844 	int rv;
   11845 
   11846 	if (sc->phy.acquire(sc) != 0) {
   11847 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11848 		return -1;
   11849 	}
   11850 
   11851 #ifdef DIAGNOSTIC
   11852 	if (reg > MII_ADDRMASK) {
   11853 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11854 		    __func__, sc->sc_phytype, reg);
   11855 		reg &= MII_ADDRMASK;
   11856 	}
   11857 #endif
   11858 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11859 
   11860 	sc->phy.release(sc);
   11861 	return rv;
   11862 }
   11863 
   11864 /*
   11865  * wm_gmii_82580_writereg:	[mii interface function]
   11866  *
   11867  *	Write a PHY register on the 82580 and I350.
   11868  * This could be handled by the PHY layer if we didn't have to lock the
   11869  * resource ...
   11870  */
   11871 static int
   11872 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11873 {
   11874 	struct wm_softc *sc = device_private(dev);
   11875 	int rv;
   11876 
   11877 	if (sc->phy.acquire(sc) != 0) {
   11878 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11879 		return -1;
   11880 	}
   11881 
   11882 #ifdef DIAGNOSTIC
   11883 	if (reg > MII_ADDRMASK) {
   11884 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11885 		    __func__, sc->sc_phytype, reg);
   11886 		reg &= MII_ADDRMASK;
   11887 	}
   11888 #endif
   11889 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11890 
   11891 	sc->phy.release(sc);
   11892 	return rv;
   11893 }
   11894 
   11895 /*
   11896  * wm_gmii_gs40g_readreg:	[mii interface function]
   11897  *
   11898  *	Read a PHY register on the I2100 and I211.
   11899  * This could be handled by the PHY layer if we didn't have to lock the
   11900  * resource ...
   11901  */
   11902 static int
   11903 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11904 {
   11905 	struct wm_softc *sc = device_private(dev);
   11906 	int page, offset;
   11907 	int rv;
   11908 
   11909 	/* Acquire semaphore */
   11910 	if (sc->phy.acquire(sc)) {
   11911 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11912 		return -1;
   11913 	}
   11914 
   11915 	/* Page select */
   11916 	page = reg >> GS40G_PAGE_SHIFT;
   11917 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11918 	if (rv != 0)
   11919 		goto release;
   11920 
   11921 	/* Read reg */
   11922 	offset = reg & GS40G_OFFSET_MASK;
   11923 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11924 
   11925 release:
   11926 	sc->phy.release(sc);
   11927 	return rv;
   11928 }
   11929 
   11930 /*
   11931  * wm_gmii_gs40g_writereg:	[mii interface function]
   11932  *
   11933  *	Write a PHY register on the I210 and I211.
   11934  * This could be handled by the PHY layer if we didn't have to lock the
   11935  * resource ...
   11936  */
   11937 static int
   11938 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11939 {
   11940 	struct wm_softc *sc = device_private(dev);
   11941 	uint16_t page;
   11942 	int offset, rv;
   11943 
   11944 	/* Acquire semaphore */
   11945 	if (sc->phy.acquire(sc)) {
   11946 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11947 		return -1;
   11948 	}
   11949 
   11950 	/* Page select */
   11951 	page = reg >> GS40G_PAGE_SHIFT;
   11952 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11953 	if (rv != 0)
   11954 		goto release;
   11955 
   11956 	/* Write reg */
   11957 	offset = reg & GS40G_OFFSET_MASK;
   11958 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11959 
   11960 release:
   11961 	/* Release semaphore */
   11962 	sc->phy.release(sc);
   11963 	return rv;
   11964 }
   11965 
   11966 /*
   11967  * wm_gmii_statchg:	[mii interface function]
   11968  *
   11969  *	Callback from MII layer when media changes.
   11970  */
   11971 static void
   11972 wm_gmii_statchg(struct ifnet *ifp)
   11973 {
   11974 	struct wm_softc *sc = ifp->if_softc;
   11975 	struct mii_data *mii = &sc->sc_mii;
   11976 
   11977 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11978 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11979 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11980 
   11981 	/* Get flow control negotiation result. */
   11982 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11983 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11984 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11985 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11986 	}
   11987 
   11988 	if (sc->sc_flowflags & IFM_FLOW) {
   11989 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11990 			sc->sc_ctrl |= CTRL_TFCE;
   11991 			sc->sc_fcrtl |= FCRTL_XONE;
   11992 		}
   11993 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11994 			sc->sc_ctrl |= CTRL_RFCE;
   11995 	}
   11996 
   11997 	if (mii->mii_media_active & IFM_FDX) {
   11998 		DPRINTF(sc, WM_DEBUG_LINK,
   11999 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12000 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12001 	} else {
   12002 		DPRINTF(sc, WM_DEBUG_LINK,
   12003 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12004 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12005 	}
   12006 
   12007 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12008 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12009 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   12010 						 : WMREG_FCRTL, sc->sc_fcrtl);
   12011 	if (sc->sc_type == WM_T_80003) {
   12012 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12013 		case IFM_1000_T:
   12014 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12015 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12016 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12017 			break;
   12018 		default:
   12019 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12020 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12021 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12022 			break;
   12023 		}
   12024 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12025 	}
   12026 }
   12027 
   12028 /* kumeran related (80003, ICH* and PCH*) */
   12029 
   12030 /*
   12031  * wm_kmrn_readreg:
   12032  *
   12033  *	Read a kumeran register
   12034  */
   12035 static int
   12036 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12037 {
   12038 	int rv;
   12039 
   12040 	if (sc->sc_type == WM_T_80003)
   12041 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12042 	else
   12043 		rv = sc->phy.acquire(sc);
   12044 	if (rv != 0) {
   12045 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12046 		    __func__);
   12047 		return rv;
   12048 	}
   12049 
   12050 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12051 
   12052 	if (sc->sc_type == WM_T_80003)
   12053 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12054 	else
   12055 		sc->phy.release(sc);
   12056 
   12057 	return rv;
   12058 }
   12059 
   12060 static int
   12061 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12062 {
   12063 
   12064 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12065 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12066 	    KUMCTRLSTA_REN);
   12067 	CSR_WRITE_FLUSH(sc);
   12068 	delay(2);
   12069 
   12070 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12071 
   12072 	return 0;
   12073 }
   12074 
   12075 /*
   12076  * wm_kmrn_writereg:
   12077  *
   12078  *	Write a kumeran register
   12079  */
   12080 static int
   12081 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12082 {
   12083 	int rv;
   12084 
   12085 	if (sc->sc_type == WM_T_80003)
   12086 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12087 	else
   12088 		rv = sc->phy.acquire(sc);
   12089 	if (rv != 0) {
   12090 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12091 		    __func__);
   12092 		return rv;
   12093 	}
   12094 
   12095 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12096 
   12097 	if (sc->sc_type == WM_T_80003)
   12098 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12099 	else
   12100 		sc->phy.release(sc);
   12101 
   12102 	return rv;
   12103 }
   12104 
   12105 static int
   12106 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12107 {
   12108 
   12109 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12110 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12111 
   12112 	return 0;
   12113 }
   12114 
   12115 /*
   12116  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12117  * This access method is different from IEEE MMD.
   12118  */
   12119 static int
   12120 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12121 {
   12122 	struct wm_softc *sc = device_private(dev);
   12123 	int rv;
   12124 
   12125 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12126 	if (rv != 0)
   12127 		return rv;
   12128 
   12129 	if (rd)
   12130 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12131 	else
   12132 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12133 	return rv;
   12134 }
   12135 
   12136 static int
   12137 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12138 {
   12139 
   12140 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12141 }
   12142 
   12143 static int
   12144 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12145 {
   12146 
   12147 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12148 }
   12149 
   12150 /* SGMII related */
   12151 
   12152 /*
   12153  * wm_sgmii_uses_mdio
   12154  *
   12155  * Check whether the transaction is to the internal PHY or the external
   12156  * MDIO interface. Return true if it's MDIO.
   12157  */
   12158 static bool
   12159 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12160 {
   12161 	uint32_t reg;
   12162 	bool ismdio = false;
   12163 
   12164 	switch (sc->sc_type) {
   12165 	case WM_T_82575:
   12166 	case WM_T_82576:
   12167 		reg = CSR_READ(sc, WMREG_MDIC);
   12168 		ismdio = ((reg & MDIC_DEST) != 0);
   12169 		break;
   12170 	case WM_T_82580:
   12171 	case WM_T_I350:
   12172 	case WM_T_I354:
   12173 	case WM_T_I210:
   12174 	case WM_T_I211:
   12175 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12176 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12177 		break;
   12178 	default:
   12179 		break;
   12180 	}
   12181 
   12182 	return ismdio;
   12183 }
   12184 
   12185 /* Setup internal SGMII PHY for SFP */
   12186 static void
   12187 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12188 {
   12189 	uint16_t id1, id2, phyreg;
   12190 	int i, rv;
   12191 
   12192 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12193 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12194 		return;
   12195 
   12196 	for (i = 0; i < MII_NPHY; i++) {
   12197 		sc->phy.no_errprint = true;
   12198 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12199 		if (rv != 0)
   12200 			continue;
   12201 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12202 		if (rv != 0)
   12203 			continue;
   12204 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12205 			continue;
   12206 		sc->phy.no_errprint = false;
   12207 
   12208 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12209 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12210 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12211 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12212 		break;
   12213 	}
   12214 
   12215 }
   12216 
   12217 /*
   12218  * wm_sgmii_readreg:	[mii interface function]
   12219  *
   12220  *	Read a PHY register on the SGMII
   12221  * This could be handled by the PHY layer if we didn't have to lock the
   12222  * resource ...
   12223  */
   12224 static int
   12225 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12226 {
   12227 	struct wm_softc *sc = device_private(dev);
   12228 	int rv;
   12229 
   12230 	if (sc->phy.acquire(sc)) {
   12231 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12232 		return -1;
   12233 	}
   12234 
   12235 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12236 
   12237 	sc->phy.release(sc);
   12238 	return rv;
   12239 }
   12240 
   12241 static int
   12242 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12243 {
   12244 	struct wm_softc *sc = device_private(dev);
   12245 	uint32_t i2ccmd;
   12246 	int i, rv = 0;
   12247 
   12248 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12249 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12250 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12251 
   12252 	/* Poll the ready bit */
   12253 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12254 		delay(50);
   12255 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12256 		if (i2ccmd & I2CCMD_READY)
   12257 			break;
   12258 	}
   12259 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12260 		device_printf(dev, "I2CCMD Read did not complete\n");
   12261 		rv = ETIMEDOUT;
   12262 	}
   12263 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12264 		if (!sc->phy.no_errprint)
   12265 			device_printf(dev, "I2CCMD Error bit set\n");
   12266 		rv = EIO;
   12267 	}
   12268 
   12269 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12270 
   12271 	return rv;
   12272 }
   12273 
   12274 /*
   12275  * wm_sgmii_writereg:	[mii interface function]
   12276  *
   12277  *	Write a PHY register on the SGMII.
   12278  * This could be handled by the PHY layer if we didn't have to lock the
   12279  * resource ...
   12280  */
   12281 static int
   12282 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12283 {
   12284 	struct wm_softc *sc = device_private(dev);
   12285 	int rv;
   12286 
   12287 	if (sc->phy.acquire(sc) != 0) {
   12288 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12289 		return -1;
   12290 	}
   12291 
   12292 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12293 
   12294 	sc->phy.release(sc);
   12295 
   12296 	return rv;
   12297 }
   12298 
   12299 static int
   12300 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12301 {
   12302 	struct wm_softc *sc = device_private(dev);
   12303 	uint32_t i2ccmd;
   12304 	uint16_t swapdata;
   12305 	int rv = 0;
   12306 	int i;
   12307 
   12308 	/* Swap the data bytes for the I2C interface */
   12309 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12310 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12311 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12312 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12313 
   12314 	/* Poll the ready bit */
   12315 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12316 		delay(50);
   12317 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12318 		if (i2ccmd & I2CCMD_READY)
   12319 			break;
   12320 	}
   12321 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12322 		device_printf(dev, "I2CCMD Write did not complete\n");
   12323 		rv = ETIMEDOUT;
   12324 	}
   12325 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12326 		device_printf(dev, "I2CCMD Error bit set\n");
   12327 		rv = EIO;
   12328 	}
   12329 
   12330 	return rv;
   12331 }
   12332 
   12333 /* TBI related */
   12334 
   12335 static bool
   12336 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12337 {
   12338 	bool sig;
   12339 
   12340 	sig = ctrl & CTRL_SWDPIN(1);
   12341 
   12342 	/*
   12343 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12344 	 * detect a signal, 1 if they don't.
   12345 	 */
   12346 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12347 		sig = !sig;
   12348 
   12349 	return sig;
   12350 }
   12351 
   12352 /*
   12353  * wm_tbi_mediainit:
   12354  *
   12355  *	Initialize media for use on 1000BASE-X devices.
   12356  */
   12357 static void
   12358 wm_tbi_mediainit(struct wm_softc *sc)
   12359 {
   12360 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12361 	const char *sep = "";
   12362 
   12363 	if (sc->sc_type < WM_T_82543)
   12364 		sc->sc_tipg = TIPG_WM_DFLT;
   12365 	else
   12366 		sc->sc_tipg = TIPG_LG_DFLT;
   12367 
   12368 	sc->sc_tbi_serdes_anegticks = 5;
   12369 
   12370 	/* Initialize our media structures */
   12371 	sc->sc_mii.mii_ifp = ifp;
   12372 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12373 
   12374 	ifp->if_baudrate = IF_Gbps(1);
   12375 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12376 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12377 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12378 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12379 		    sc->sc_core_lock);
   12380 	} else {
   12381 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12382 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12383 	}
   12384 
   12385 	/*
   12386 	 * SWD Pins:
   12387 	 *
   12388 	 *	0 = Link LED (output)
   12389 	 *	1 = Loss Of Signal (input)
   12390 	 */
   12391 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12392 
   12393 	/* XXX Perhaps this is only for TBI */
   12394 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12395 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12396 
   12397 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12398 		sc->sc_ctrl &= ~CTRL_LRST;
   12399 
   12400 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12401 
   12402 #define	ADD(ss, mm, dd)							\
   12403 do {									\
   12404 	aprint_normal("%s%s", sep, ss);					\
   12405 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12406 	sep = ", ";							\
   12407 } while (/*CONSTCOND*/0)
   12408 
   12409 	aprint_normal_dev(sc->sc_dev, "");
   12410 
   12411 	if (sc->sc_type == WM_T_I354) {
   12412 		uint32_t status;
   12413 
   12414 		status = CSR_READ(sc, WMREG_STATUS);
   12415 		if (((status & STATUS_2P5_SKU) != 0)
   12416 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12417 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12418 		} else
   12419 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12420 	} else if (sc->sc_type == WM_T_82545) {
   12421 		/* Only 82545 is LX (XXX except SFP) */
   12422 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12423 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12424 	} else if (sc->sc_sfptype != 0) {
   12425 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12426 		switch (sc->sc_sfptype) {
   12427 		default:
   12428 		case SFF_SFP_ETH_FLAGS_1000SX:
   12429 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12430 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12431 			break;
   12432 		case SFF_SFP_ETH_FLAGS_1000LX:
   12433 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12434 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12435 			break;
   12436 		case SFF_SFP_ETH_FLAGS_1000CX:
   12437 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12438 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12439 			break;
   12440 		case SFF_SFP_ETH_FLAGS_1000T:
   12441 			ADD("1000baseT", IFM_1000_T, 0);
   12442 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12443 			break;
   12444 		case SFF_SFP_ETH_FLAGS_100FX:
   12445 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12446 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12447 			break;
   12448 		}
   12449 	} else {
   12450 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12451 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12452 	}
   12453 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12454 	aprint_normal("\n");
   12455 
   12456 #undef ADD
   12457 
   12458 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12459 }
   12460 
   12461 /*
   12462  * wm_tbi_mediachange:	[ifmedia interface function]
   12463  *
   12464  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12465  */
   12466 static int
   12467 wm_tbi_mediachange(struct ifnet *ifp)
   12468 {
   12469 	struct wm_softc *sc = ifp->if_softc;
   12470 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12471 	uint32_t status, ctrl;
   12472 	bool signal;
   12473 	int i;
   12474 
   12475 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12476 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12477 		/* XXX need some work for >= 82571 and < 82575 */
   12478 		if (sc->sc_type < WM_T_82575)
   12479 			return 0;
   12480 	}
   12481 
   12482 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12483 	    || (sc->sc_type >= WM_T_82575))
   12484 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12485 
   12486 	sc->sc_ctrl &= ~CTRL_LRST;
   12487 	sc->sc_txcw = TXCW_ANE;
   12488 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12489 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12490 	else if (ife->ifm_media & IFM_FDX)
   12491 		sc->sc_txcw |= TXCW_FD;
   12492 	else
   12493 		sc->sc_txcw |= TXCW_HD;
   12494 
   12495 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12496 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12497 
   12498 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12499 		device_xname(sc->sc_dev), sc->sc_txcw));
   12500 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12501 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12502 	CSR_WRITE_FLUSH(sc);
   12503 	delay(1000);
   12504 
   12505 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12506 	signal = wm_tbi_havesignal(sc, ctrl);
   12507 
   12508 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12509 		signal));
   12510 
   12511 	if (signal) {
   12512 		/* Have signal; wait for the link to come up. */
   12513 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12514 			delay(10000);
   12515 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12516 				break;
   12517 		}
   12518 
   12519 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12520 			device_xname(sc->sc_dev), i));
   12521 
   12522 		status = CSR_READ(sc, WMREG_STATUS);
   12523 		DPRINTF(sc, WM_DEBUG_LINK,
   12524 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12525 			device_xname(sc->sc_dev), status, STATUS_LU));
   12526 		if (status & STATUS_LU) {
   12527 			/* Link is up. */
   12528 			DPRINTF(sc, WM_DEBUG_LINK,
   12529 			    ("%s: LINK: set media -> link up %s\n",
   12530 				device_xname(sc->sc_dev),
   12531 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12532 
   12533 			/*
   12534 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12535 			 * so we should update sc->sc_ctrl
   12536 			 */
   12537 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12538 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12539 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12540 			if (status & STATUS_FD)
   12541 				sc->sc_tctl |=
   12542 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12543 			else
   12544 				sc->sc_tctl |=
   12545 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12546 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12547 				sc->sc_fcrtl |= FCRTL_XONE;
   12548 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12549 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12550 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12551 			sc->sc_tbi_linkup = 1;
   12552 		} else {
   12553 			if (i == WM_LINKUP_TIMEOUT)
   12554 				wm_check_for_link(sc);
   12555 			/* Link is down. */
   12556 			DPRINTF(sc, WM_DEBUG_LINK,
   12557 			    ("%s: LINK: set media -> link down\n",
   12558 				device_xname(sc->sc_dev)));
   12559 			sc->sc_tbi_linkup = 0;
   12560 		}
   12561 	} else {
   12562 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12563 			device_xname(sc->sc_dev)));
   12564 		sc->sc_tbi_linkup = 0;
   12565 	}
   12566 
   12567 	wm_tbi_serdes_set_linkled(sc);
   12568 
   12569 	return 0;
   12570 }
   12571 
   12572 /*
   12573  * wm_tbi_mediastatus:	[ifmedia interface function]
   12574  *
   12575  *	Get the current interface media status on a 1000BASE-X device.
   12576  */
   12577 static void
   12578 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12579 {
   12580 	struct wm_softc *sc = ifp->if_softc;
   12581 	uint32_t ctrl, status;
   12582 
   12583 	ifmr->ifm_status = IFM_AVALID;
   12584 	ifmr->ifm_active = IFM_ETHER;
   12585 
   12586 	status = CSR_READ(sc, WMREG_STATUS);
   12587 	if ((status & STATUS_LU) == 0) {
   12588 		ifmr->ifm_active |= IFM_NONE;
   12589 		return;
   12590 	}
   12591 
   12592 	ifmr->ifm_status |= IFM_ACTIVE;
   12593 	/* Only 82545 is LX */
   12594 	if (sc->sc_type == WM_T_82545)
   12595 		ifmr->ifm_active |= IFM_1000_LX;
   12596 	else
   12597 		ifmr->ifm_active |= IFM_1000_SX;
   12598 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12599 		ifmr->ifm_active |= IFM_FDX;
   12600 	else
   12601 		ifmr->ifm_active |= IFM_HDX;
   12602 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12603 	if (ctrl & CTRL_RFCE)
   12604 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12605 	if (ctrl & CTRL_TFCE)
   12606 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12607 }
   12608 
   12609 /* XXX TBI only */
   12610 static int
   12611 wm_check_for_link(struct wm_softc *sc)
   12612 {
   12613 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12614 	uint32_t rxcw;
   12615 	uint32_t ctrl;
   12616 	uint32_t status;
   12617 	bool signal;
   12618 
   12619 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12620 		device_xname(sc->sc_dev), __func__));
   12621 
   12622 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12623 		/* XXX need some work for >= 82571 */
   12624 		if (sc->sc_type >= WM_T_82571) {
   12625 			sc->sc_tbi_linkup = 1;
   12626 			return 0;
   12627 		}
   12628 	}
   12629 
   12630 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12631 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12632 	status = CSR_READ(sc, WMREG_STATUS);
   12633 	signal = wm_tbi_havesignal(sc, ctrl);
   12634 
   12635 	DPRINTF(sc, WM_DEBUG_LINK,
   12636 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12637 		device_xname(sc->sc_dev), __func__, signal,
   12638 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12639 
   12640 	/*
   12641 	 * SWDPIN   LU RXCW
   12642 	 *	0    0	  0
   12643 	 *	0    0	  1	(should not happen)
   12644 	 *	0    1	  0	(should not happen)
   12645 	 *	0    1	  1	(should not happen)
   12646 	 *	1    0	  0	Disable autonego and force linkup
   12647 	 *	1    0	  1	got /C/ but not linkup yet
   12648 	 *	1    1	  0	(linkup)
   12649 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12650 	 *
   12651 	 */
   12652 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12653 		DPRINTF(sc, WM_DEBUG_LINK,
   12654 		    ("%s: %s: force linkup and fullduplex\n",
   12655 			device_xname(sc->sc_dev), __func__));
   12656 		sc->sc_tbi_linkup = 0;
   12657 		/* Disable auto-negotiation in the TXCW register */
   12658 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12659 
   12660 		/*
   12661 		 * Force link-up and also force full-duplex.
   12662 		 *
   12663 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12664 		 * so we should update sc->sc_ctrl
   12665 		 */
   12666 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12667 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12668 	} else if (((status & STATUS_LU) != 0)
   12669 	    && ((rxcw & RXCW_C) != 0)
   12670 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12671 		sc->sc_tbi_linkup = 1;
   12672 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12673 			device_xname(sc->sc_dev),
   12674 			__func__));
   12675 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12676 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12677 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12678 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12679 			device_xname(sc->sc_dev), __func__));
   12680 	} else {
   12681 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12682 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12683 			status));
   12684 	}
   12685 
   12686 	return 0;
   12687 }
   12688 
   12689 /*
   12690  * wm_tbi_tick:
   12691  *
   12692  *	Check the link on TBI devices.
   12693  *	This function acts as mii_tick().
   12694  */
   12695 static void
   12696 wm_tbi_tick(struct wm_softc *sc)
   12697 {
   12698 	struct mii_data *mii = &sc->sc_mii;
   12699 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12700 	uint32_t status;
   12701 
   12702 	KASSERT(WM_CORE_LOCKED(sc));
   12703 
   12704 	status = CSR_READ(sc, WMREG_STATUS);
   12705 
   12706 	/* XXX is this needed? */
   12707 	(void)CSR_READ(sc, WMREG_RXCW);
   12708 	(void)CSR_READ(sc, WMREG_CTRL);
   12709 
   12710 	/* set link status */
   12711 	if ((status & STATUS_LU) == 0) {
   12712 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12713 			device_xname(sc->sc_dev)));
   12714 		sc->sc_tbi_linkup = 0;
   12715 	} else if (sc->sc_tbi_linkup == 0) {
   12716 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12717 			device_xname(sc->sc_dev),
   12718 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12719 		sc->sc_tbi_linkup = 1;
   12720 		sc->sc_tbi_serdes_ticks = 0;
   12721 	}
   12722 
   12723 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12724 		goto setled;
   12725 
   12726 	if ((status & STATUS_LU) == 0) {
   12727 		sc->sc_tbi_linkup = 0;
   12728 		/* If the timer expired, retry autonegotiation */
   12729 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12730 		    && (++sc->sc_tbi_serdes_ticks
   12731 			>= sc->sc_tbi_serdes_anegticks)) {
   12732 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12733 				device_xname(sc->sc_dev), __func__));
   12734 			sc->sc_tbi_serdes_ticks = 0;
   12735 			/*
   12736 			 * Reset the link, and let autonegotiation do
   12737 			 * its thing
   12738 			 */
   12739 			sc->sc_ctrl |= CTRL_LRST;
   12740 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12741 			CSR_WRITE_FLUSH(sc);
   12742 			delay(1000);
   12743 			sc->sc_ctrl &= ~CTRL_LRST;
   12744 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12745 			CSR_WRITE_FLUSH(sc);
   12746 			delay(1000);
   12747 			CSR_WRITE(sc, WMREG_TXCW,
   12748 			    sc->sc_txcw & ~TXCW_ANE);
   12749 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12750 		}
   12751 	}
   12752 
   12753 setled:
   12754 	wm_tbi_serdes_set_linkled(sc);
   12755 }
   12756 
   12757 /* SERDES related */
   12758 static void
   12759 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12760 {
   12761 	uint32_t reg;
   12762 
   12763 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12764 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12765 		return;
   12766 
   12767 	/* Enable PCS to turn on link */
   12768 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12769 	reg |= PCS_CFG_PCS_EN;
   12770 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12771 
   12772 	/* Power up the laser */
   12773 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12774 	reg &= ~CTRL_EXT_SWDPIN(3);
   12775 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12776 
   12777 	/* Flush the write to verify completion */
   12778 	CSR_WRITE_FLUSH(sc);
   12779 	delay(1000);
   12780 }
   12781 
   12782 static int
   12783 wm_serdes_mediachange(struct ifnet *ifp)
   12784 {
   12785 	struct wm_softc *sc = ifp->if_softc;
   12786 	bool pcs_autoneg = true; /* XXX */
   12787 	uint32_t ctrl_ext, pcs_lctl, reg;
   12788 
   12789 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12790 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12791 		return 0;
   12792 
   12793 	/* XXX Currently, this function is not called on 8257[12] */
   12794 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12795 	    || (sc->sc_type >= WM_T_82575))
   12796 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12797 
   12798 	/* Power on the sfp cage if present */
   12799 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12800 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12801 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12802 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12803 
   12804 	sc->sc_ctrl |= CTRL_SLU;
   12805 
   12806 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12807 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12808 
   12809 		reg = CSR_READ(sc, WMREG_CONNSW);
   12810 		reg |= CONNSW_ENRGSRC;
   12811 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12812 	}
   12813 
   12814 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12815 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12816 	case CTRL_EXT_LINK_MODE_SGMII:
   12817 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12818 		pcs_autoneg = true;
   12819 		/* Autoneg time out should be disabled for SGMII mode */
   12820 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12821 		break;
   12822 	case CTRL_EXT_LINK_MODE_1000KX:
   12823 		pcs_autoneg = false;
   12824 		/* FALLTHROUGH */
   12825 	default:
   12826 		if ((sc->sc_type == WM_T_82575)
   12827 		    || (sc->sc_type == WM_T_82576)) {
   12828 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12829 				pcs_autoneg = false;
   12830 		}
   12831 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12832 		    | CTRL_FRCFDX;
   12833 
   12834 		/* Set speed of 1000/Full if speed/duplex is forced */
   12835 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12836 	}
   12837 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12838 
   12839 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12840 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12841 
   12842 	if (pcs_autoneg) {
   12843 		/* Set PCS register for autoneg */
   12844 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12845 
   12846 		/* Disable force flow control for autoneg */
   12847 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12848 
   12849 		/* Configure flow control advertisement for autoneg */
   12850 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12851 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12852 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12853 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12854 	} else
   12855 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12856 
   12857 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12858 
   12859 	return 0;
   12860 }
   12861 
   12862 static void
   12863 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12864 {
   12865 	struct wm_softc *sc = ifp->if_softc;
   12866 	struct mii_data *mii = &sc->sc_mii;
   12867 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12868 	uint32_t pcs_adv, pcs_lpab, reg;
   12869 
   12870 	ifmr->ifm_status = IFM_AVALID;
   12871 	ifmr->ifm_active = IFM_ETHER;
   12872 
   12873 	/* Check PCS */
   12874 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12875 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12876 		ifmr->ifm_active |= IFM_NONE;
   12877 		sc->sc_tbi_linkup = 0;
   12878 		goto setled;
   12879 	}
   12880 
   12881 	sc->sc_tbi_linkup = 1;
   12882 	ifmr->ifm_status |= IFM_ACTIVE;
   12883 	if (sc->sc_type == WM_T_I354) {
   12884 		uint32_t status;
   12885 
   12886 		status = CSR_READ(sc, WMREG_STATUS);
   12887 		if (((status & STATUS_2P5_SKU) != 0)
   12888 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12889 			ifmr->ifm_active |= IFM_2500_KX;
   12890 		} else
   12891 			ifmr->ifm_active |= IFM_1000_KX;
   12892 	} else {
   12893 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12894 		case PCS_LSTS_SPEED_10:
   12895 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12896 			break;
   12897 		case PCS_LSTS_SPEED_100:
   12898 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12899 			break;
   12900 		case PCS_LSTS_SPEED_1000:
   12901 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12902 			break;
   12903 		default:
   12904 			device_printf(sc->sc_dev, "Unknown speed\n");
   12905 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12906 			break;
   12907 		}
   12908 	}
   12909 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12910 	if ((reg & PCS_LSTS_FDX) != 0)
   12911 		ifmr->ifm_active |= IFM_FDX;
   12912 	else
   12913 		ifmr->ifm_active |= IFM_HDX;
   12914 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12915 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12916 		/* Check flow */
   12917 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12918 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12919 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12920 			goto setled;
   12921 		}
   12922 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12923 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12924 		DPRINTF(sc, WM_DEBUG_LINK,
   12925 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12926 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12927 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12928 			mii->mii_media_active |= IFM_FLOW
   12929 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12930 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12931 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12932 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12933 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12934 			mii->mii_media_active |= IFM_FLOW
   12935 			    | IFM_ETH_TXPAUSE;
   12936 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12937 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12938 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12939 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12940 			mii->mii_media_active |= IFM_FLOW
   12941 			    | IFM_ETH_RXPAUSE;
   12942 		}
   12943 	}
   12944 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12945 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12946 setled:
   12947 	wm_tbi_serdes_set_linkled(sc);
   12948 }
   12949 
   12950 /*
   12951  * wm_serdes_tick:
   12952  *
   12953  *	Check the link on serdes devices.
   12954  */
   12955 static void
   12956 wm_serdes_tick(struct wm_softc *sc)
   12957 {
   12958 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12959 	struct mii_data *mii = &sc->sc_mii;
   12960 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12961 	uint32_t reg;
   12962 
   12963 	KASSERT(WM_CORE_LOCKED(sc));
   12964 
   12965 	mii->mii_media_status = IFM_AVALID;
   12966 	mii->mii_media_active = IFM_ETHER;
   12967 
   12968 	/* Check PCS */
   12969 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12970 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12971 		mii->mii_media_status |= IFM_ACTIVE;
   12972 		sc->sc_tbi_linkup = 1;
   12973 		sc->sc_tbi_serdes_ticks = 0;
   12974 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12975 		if ((reg & PCS_LSTS_FDX) != 0)
   12976 			mii->mii_media_active |= IFM_FDX;
   12977 		else
   12978 			mii->mii_media_active |= IFM_HDX;
   12979 	} else {
   12980 		mii->mii_media_status |= IFM_NONE;
   12981 		sc->sc_tbi_linkup = 0;
   12982 		/* If the timer expired, retry autonegotiation */
   12983 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12984 		    && (++sc->sc_tbi_serdes_ticks
   12985 			>= sc->sc_tbi_serdes_anegticks)) {
   12986 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12987 				device_xname(sc->sc_dev), __func__));
   12988 			sc->sc_tbi_serdes_ticks = 0;
   12989 			/* XXX */
   12990 			wm_serdes_mediachange(ifp);
   12991 		}
   12992 	}
   12993 
   12994 	wm_tbi_serdes_set_linkled(sc);
   12995 }
   12996 
   12997 /* SFP related */
   12998 
   12999 static int
   13000 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13001 {
   13002 	uint32_t i2ccmd;
   13003 	int i;
   13004 
   13005 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13006 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13007 
   13008 	/* Poll the ready bit */
   13009 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13010 		delay(50);
   13011 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13012 		if (i2ccmd & I2CCMD_READY)
   13013 			break;
   13014 	}
   13015 	if ((i2ccmd & I2CCMD_READY) == 0)
   13016 		return -1;
   13017 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13018 		return -1;
   13019 
   13020 	*data = i2ccmd & 0x00ff;
   13021 
   13022 	return 0;
   13023 }
   13024 
   13025 static uint32_t
   13026 wm_sfp_get_media_type(struct wm_softc *sc)
   13027 {
   13028 	uint32_t ctrl_ext;
   13029 	uint8_t val = 0;
   13030 	int timeout = 3;
   13031 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13032 	int rv = -1;
   13033 
   13034 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13035 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13036 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13037 	CSR_WRITE_FLUSH(sc);
   13038 
   13039 	/* Read SFP module data */
   13040 	while (timeout) {
   13041 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13042 		if (rv == 0)
   13043 			break;
   13044 		delay(100*1000); /* XXX too big */
   13045 		timeout--;
   13046 	}
   13047 	if (rv != 0)
   13048 		goto out;
   13049 
   13050 	switch (val) {
   13051 	case SFF_SFP_ID_SFF:
   13052 		aprint_normal_dev(sc->sc_dev,
   13053 		    "Module/Connector soldered to board\n");
   13054 		break;
   13055 	case SFF_SFP_ID_SFP:
   13056 		sc->sc_flags |= WM_F_SFP;
   13057 		break;
   13058 	case SFF_SFP_ID_UNKNOWN:
   13059 		goto out;
   13060 	default:
   13061 		break;
   13062 	}
   13063 
   13064 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13065 	if (rv != 0)
   13066 		goto out;
   13067 
   13068 	sc->sc_sfptype = val;
   13069 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13070 		mediatype = WM_MEDIATYPE_SERDES;
   13071 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13072 		sc->sc_flags |= WM_F_SGMII;
   13073 		mediatype = WM_MEDIATYPE_COPPER;
   13074 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13075 		sc->sc_flags |= WM_F_SGMII;
   13076 		mediatype = WM_MEDIATYPE_SERDES;
   13077 	} else {
   13078 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13079 		    __func__, sc->sc_sfptype);
   13080 		sc->sc_sfptype = 0; /* XXX unknown */
   13081 	}
   13082 
   13083 out:
   13084 	/* Restore I2C interface setting */
   13085 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13086 
   13087 	return mediatype;
   13088 }
   13089 
   13090 /*
   13091  * NVM related.
   13092  * Microwire, SPI (w/wo EERD) and Flash.
   13093  */
   13094 
   13095 /* Both spi and uwire */
   13096 
   13097 /*
   13098  * wm_eeprom_sendbits:
   13099  *
   13100  *	Send a series of bits to the EEPROM.
   13101  */
   13102 static void
   13103 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13104 {
   13105 	uint32_t reg;
   13106 	int x;
   13107 
   13108 	reg = CSR_READ(sc, WMREG_EECD);
   13109 
   13110 	for (x = nbits; x > 0; x--) {
   13111 		if (bits & (1U << (x - 1)))
   13112 			reg |= EECD_DI;
   13113 		else
   13114 			reg &= ~EECD_DI;
   13115 		CSR_WRITE(sc, WMREG_EECD, reg);
   13116 		CSR_WRITE_FLUSH(sc);
   13117 		delay(2);
   13118 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13119 		CSR_WRITE_FLUSH(sc);
   13120 		delay(2);
   13121 		CSR_WRITE(sc, WMREG_EECD, reg);
   13122 		CSR_WRITE_FLUSH(sc);
   13123 		delay(2);
   13124 	}
   13125 }
   13126 
   13127 /*
   13128  * wm_eeprom_recvbits:
   13129  *
   13130  *	Receive a series of bits from the EEPROM.
   13131  */
   13132 static void
   13133 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13134 {
   13135 	uint32_t reg, val;
   13136 	int x;
   13137 
   13138 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13139 
   13140 	val = 0;
   13141 	for (x = nbits; x > 0; x--) {
   13142 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13143 		CSR_WRITE_FLUSH(sc);
   13144 		delay(2);
   13145 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13146 			val |= (1U << (x - 1));
   13147 		CSR_WRITE(sc, WMREG_EECD, reg);
   13148 		CSR_WRITE_FLUSH(sc);
   13149 		delay(2);
   13150 	}
   13151 	*valp = val;
   13152 }
   13153 
   13154 /* Microwire */
   13155 
   13156 /*
   13157  * wm_nvm_read_uwire:
   13158  *
   13159  *	Read a word from the EEPROM using the MicroWire protocol.
   13160  */
   13161 static int
   13162 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13163 {
   13164 	uint32_t reg, val;
   13165 	int i;
   13166 
   13167 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13168 		device_xname(sc->sc_dev), __func__));
   13169 
   13170 	if (sc->nvm.acquire(sc) != 0)
   13171 		return -1;
   13172 
   13173 	for (i = 0; i < wordcnt; i++) {
   13174 		/* Clear SK and DI. */
   13175 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13176 		CSR_WRITE(sc, WMREG_EECD, reg);
   13177 
   13178 		/*
   13179 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13180 		 * and Xen.
   13181 		 *
   13182 		 * We use this workaround only for 82540 because qemu's
   13183 		 * e1000 act as 82540.
   13184 		 */
   13185 		if (sc->sc_type == WM_T_82540) {
   13186 			reg |= EECD_SK;
   13187 			CSR_WRITE(sc, WMREG_EECD, reg);
   13188 			reg &= ~EECD_SK;
   13189 			CSR_WRITE(sc, WMREG_EECD, reg);
   13190 			CSR_WRITE_FLUSH(sc);
   13191 			delay(2);
   13192 		}
   13193 		/* XXX: end of workaround */
   13194 
   13195 		/* Set CHIP SELECT. */
   13196 		reg |= EECD_CS;
   13197 		CSR_WRITE(sc, WMREG_EECD, reg);
   13198 		CSR_WRITE_FLUSH(sc);
   13199 		delay(2);
   13200 
   13201 		/* Shift in the READ command. */
   13202 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13203 
   13204 		/* Shift in address. */
   13205 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13206 
   13207 		/* Shift out the data. */
   13208 		wm_eeprom_recvbits(sc, &val, 16);
   13209 		data[i] = val & 0xffff;
   13210 
   13211 		/* Clear CHIP SELECT. */
   13212 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13213 		CSR_WRITE(sc, WMREG_EECD, reg);
   13214 		CSR_WRITE_FLUSH(sc);
   13215 		delay(2);
   13216 	}
   13217 
   13218 	sc->nvm.release(sc);
   13219 	return 0;
   13220 }
   13221 
   13222 /* SPI */
   13223 
   13224 /*
   13225  * Set SPI and FLASH related information from the EECD register.
   13226  * For 82541 and 82547, the word size is taken from EEPROM.
   13227  */
   13228 static int
   13229 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13230 {
   13231 	int size;
   13232 	uint32_t reg;
   13233 	uint16_t data;
   13234 
   13235 	reg = CSR_READ(sc, WMREG_EECD);
   13236 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13237 
   13238 	/* Read the size of NVM from EECD by default */
   13239 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13240 	switch (sc->sc_type) {
   13241 	case WM_T_82541:
   13242 	case WM_T_82541_2:
   13243 	case WM_T_82547:
   13244 	case WM_T_82547_2:
   13245 		/* Set dummy value to access EEPROM */
   13246 		sc->sc_nvm_wordsize = 64;
   13247 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13248 			aprint_error_dev(sc->sc_dev,
   13249 			    "%s: failed to read EEPROM size\n", __func__);
   13250 		}
   13251 		reg = data;
   13252 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13253 		if (size == 0)
   13254 			size = 6; /* 64 word size */
   13255 		else
   13256 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13257 		break;
   13258 	case WM_T_80003:
   13259 	case WM_T_82571:
   13260 	case WM_T_82572:
   13261 	case WM_T_82573: /* SPI case */
   13262 	case WM_T_82574: /* SPI case */
   13263 	case WM_T_82583: /* SPI case */
   13264 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13265 		if (size > 14)
   13266 			size = 14;
   13267 		break;
   13268 	case WM_T_82575:
   13269 	case WM_T_82576:
   13270 	case WM_T_82580:
   13271 	case WM_T_I350:
   13272 	case WM_T_I354:
   13273 	case WM_T_I210:
   13274 	case WM_T_I211:
   13275 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13276 		if (size > 15)
   13277 			size = 15;
   13278 		break;
   13279 	default:
   13280 		aprint_error_dev(sc->sc_dev,
   13281 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13282 		return -1;
   13283 		break;
   13284 	}
   13285 
   13286 	sc->sc_nvm_wordsize = 1 << size;
   13287 
   13288 	return 0;
   13289 }
   13290 
   13291 /*
   13292  * wm_nvm_ready_spi:
   13293  *
   13294  *	Wait for a SPI EEPROM to be ready for commands.
   13295  */
   13296 static int
   13297 wm_nvm_ready_spi(struct wm_softc *sc)
   13298 {
   13299 	uint32_t val;
   13300 	int usec;
   13301 
   13302 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13303 		device_xname(sc->sc_dev), __func__));
   13304 
   13305 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13306 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13307 		wm_eeprom_recvbits(sc, &val, 8);
   13308 		if ((val & SPI_SR_RDY) == 0)
   13309 			break;
   13310 	}
   13311 	if (usec >= SPI_MAX_RETRIES) {
   13312 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13313 		return -1;
   13314 	}
   13315 	return 0;
   13316 }
   13317 
   13318 /*
   13319  * wm_nvm_read_spi:
   13320  *
   13321  *	Read a work from the EEPROM using the SPI protocol.
   13322  */
   13323 static int
   13324 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13325 {
   13326 	uint32_t reg, val;
   13327 	int i;
   13328 	uint8_t opc;
   13329 	int rv = 0;
   13330 
   13331 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13332 		device_xname(sc->sc_dev), __func__));
   13333 
   13334 	if (sc->nvm.acquire(sc) != 0)
   13335 		return -1;
   13336 
   13337 	/* Clear SK and CS. */
   13338 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13339 	CSR_WRITE(sc, WMREG_EECD, reg);
   13340 	CSR_WRITE_FLUSH(sc);
   13341 	delay(2);
   13342 
   13343 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13344 		goto out;
   13345 
   13346 	/* Toggle CS to flush commands. */
   13347 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13348 	CSR_WRITE_FLUSH(sc);
   13349 	delay(2);
   13350 	CSR_WRITE(sc, WMREG_EECD, reg);
   13351 	CSR_WRITE_FLUSH(sc);
   13352 	delay(2);
   13353 
   13354 	opc = SPI_OPC_READ;
   13355 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13356 		opc |= SPI_OPC_A8;
   13357 
   13358 	wm_eeprom_sendbits(sc, opc, 8);
   13359 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13360 
   13361 	for (i = 0; i < wordcnt; i++) {
   13362 		wm_eeprom_recvbits(sc, &val, 16);
   13363 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13364 	}
   13365 
   13366 	/* Raise CS and clear SK. */
   13367 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13368 	CSR_WRITE(sc, WMREG_EECD, reg);
   13369 	CSR_WRITE_FLUSH(sc);
   13370 	delay(2);
   13371 
   13372 out:
   13373 	sc->nvm.release(sc);
   13374 	return rv;
   13375 }
   13376 
   13377 /* Using with EERD */
   13378 
   13379 static int
   13380 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13381 {
   13382 	uint32_t attempts = 100000;
   13383 	uint32_t i, reg = 0;
   13384 	int32_t done = -1;
   13385 
   13386 	for (i = 0; i < attempts; i++) {
   13387 		reg = CSR_READ(sc, rw);
   13388 
   13389 		if (reg & EERD_DONE) {
   13390 			done = 0;
   13391 			break;
   13392 		}
   13393 		delay(5);
   13394 	}
   13395 
   13396 	return done;
   13397 }
   13398 
   13399 static int
   13400 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13401 {
   13402 	int i, eerd = 0;
   13403 	int rv = 0;
   13404 
   13405 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13406 		device_xname(sc->sc_dev), __func__));
   13407 
   13408 	if (sc->nvm.acquire(sc) != 0)
   13409 		return -1;
   13410 
   13411 	for (i = 0; i < wordcnt; i++) {
   13412 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13413 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13414 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13415 		if (rv != 0) {
   13416 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13417 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13418 			break;
   13419 		}
   13420 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13421 	}
   13422 
   13423 	sc->nvm.release(sc);
   13424 	return rv;
   13425 }
   13426 
   13427 /* Flash */
   13428 
   13429 static int
   13430 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13431 {
   13432 	uint32_t eecd;
   13433 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13434 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13435 	uint32_t nvm_dword = 0;
   13436 	uint8_t sig_byte = 0;
   13437 	int rv;
   13438 
   13439 	switch (sc->sc_type) {
   13440 	case WM_T_PCH_SPT:
   13441 	case WM_T_PCH_CNP:
   13442 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13443 		act_offset = ICH_NVM_SIG_WORD * 2;
   13444 
   13445 		/* Set bank to 0 in case flash read fails. */
   13446 		*bank = 0;
   13447 
   13448 		/* Check bank 0 */
   13449 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13450 		if (rv != 0)
   13451 			return rv;
   13452 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13453 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13454 			*bank = 0;
   13455 			return 0;
   13456 		}
   13457 
   13458 		/* Check bank 1 */
   13459 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13460 		    &nvm_dword);
   13461 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13462 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13463 			*bank = 1;
   13464 			return 0;
   13465 		}
   13466 		aprint_error_dev(sc->sc_dev,
   13467 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13468 		return -1;
   13469 	case WM_T_ICH8:
   13470 	case WM_T_ICH9:
   13471 		eecd = CSR_READ(sc, WMREG_EECD);
   13472 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13473 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13474 			return 0;
   13475 		}
   13476 		/* FALLTHROUGH */
   13477 	default:
   13478 		/* Default to 0 */
   13479 		*bank = 0;
   13480 
   13481 		/* Check bank 0 */
   13482 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13483 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13484 			*bank = 0;
   13485 			return 0;
   13486 		}
   13487 
   13488 		/* Check bank 1 */
   13489 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13490 		    &sig_byte);
   13491 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13492 			*bank = 1;
   13493 			return 0;
   13494 		}
   13495 	}
   13496 
   13497 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13498 		device_xname(sc->sc_dev)));
   13499 	return -1;
   13500 }
   13501 
   13502 /******************************************************************************
   13503  * This function does initial flash setup so that a new read/write/erase cycle
   13504  * can be started.
   13505  *
   13506  * sc - The pointer to the hw structure
   13507  ****************************************************************************/
   13508 static int32_t
   13509 wm_ich8_cycle_init(struct wm_softc *sc)
   13510 {
   13511 	uint16_t hsfsts;
   13512 	int32_t error = 1;
   13513 	int32_t i     = 0;
   13514 
   13515 	if (sc->sc_type >= WM_T_PCH_SPT)
   13516 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13517 	else
   13518 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13519 
   13520 	/* May be check the Flash Des Valid bit in Hw status */
   13521 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13522 		return error;
   13523 
   13524 	/* Clear FCERR in Hw status by writing 1 */
   13525 	/* Clear DAEL in Hw status by writing a 1 */
   13526 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13527 
   13528 	if (sc->sc_type >= WM_T_PCH_SPT)
   13529 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13530 	else
   13531 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13532 
   13533 	/*
   13534 	 * Either we should have a hardware SPI cycle in progress bit to check
   13535 	 * against, in order to start a new cycle or FDONE bit should be
   13536 	 * changed in the hardware so that it is 1 after hardware reset, which
   13537 	 * can then be used as an indication whether a cycle is in progress or
   13538 	 * has been completed .. we should also have some software semaphore
   13539 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13540 	 * threads access to those bits can be sequentiallized or a way so that
   13541 	 * 2 threads don't start the cycle at the same time
   13542 	 */
   13543 
   13544 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13545 		/*
   13546 		 * There is no cycle running at present, so we can start a
   13547 		 * cycle
   13548 		 */
   13549 
   13550 		/* Begin by setting Flash Cycle Done. */
   13551 		hsfsts |= HSFSTS_DONE;
   13552 		if (sc->sc_type >= WM_T_PCH_SPT)
   13553 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13554 			    hsfsts & 0xffffUL);
   13555 		else
   13556 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13557 		error = 0;
   13558 	} else {
   13559 		/*
   13560 		 * Otherwise poll for sometime so the current cycle has a
   13561 		 * chance to end before giving up.
   13562 		 */
   13563 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13564 			if (sc->sc_type >= WM_T_PCH_SPT)
   13565 				hsfsts = ICH8_FLASH_READ32(sc,
   13566 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13567 			else
   13568 				hsfsts = ICH8_FLASH_READ16(sc,
   13569 				    ICH_FLASH_HSFSTS);
   13570 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13571 				error = 0;
   13572 				break;
   13573 			}
   13574 			delay(1);
   13575 		}
   13576 		if (error == 0) {
   13577 			/*
   13578 			 * Successful in waiting for previous cycle to timeout,
   13579 			 * now set the Flash Cycle Done.
   13580 			 */
   13581 			hsfsts |= HSFSTS_DONE;
   13582 			if (sc->sc_type >= WM_T_PCH_SPT)
   13583 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13584 				    hsfsts & 0xffffUL);
   13585 			else
   13586 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13587 				    hsfsts);
   13588 		}
   13589 	}
   13590 	return error;
   13591 }
   13592 
   13593 /******************************************************************************
   13594  * This function starts a flash cycle and waits for its completion
   13595  *
   13596  * sc - The pointer to the hw structure
   13597  ****************************************************************************/
   13598 static int32_t
   13599 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13600 {
   13601 	uint16_t hsflctl;
   13602 	uint16_t hsfsts;
   13603 	int32_t error = 1;
   13604 	uint32_t i = 0;
   13605 
   13606 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13607 	if (sc->sc_type >= WM_T_PCH_SPT)
   13608 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13609 	else
   13610 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13611 	hsflctl |= HSFCTL_GO;
   13612 	if (sc->sc_type >= WM_T_PCH_SPT)
   13613 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13614 		    (uint32_t)hsflctl << 16);
   13615 	else
   13616 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13617 
   13618 	/* Wait till FDONE bit is set to 1 */
   13619 	do {
   13620 		if (sc->sc_type >= WM_T_PCH_SPT)
   13621 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13622 			    & 0xffffUL;
   13623 		else
   13624 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13625 		if (hsfsts & HSFSTS_DONE)
   13626 			break;
   13627 		delay(1);
   13628 		i++;
   13629 	} while (i < timeout);
   13630 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13631 		error = 0;
   13632 
   13633 	return error;
   13634 }
   13635 
   13636 /******************************************************************************
   13637  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13638  *
   13639  * sc - The pointer to the hw structure
   13640  * index - The index of the byte or word to read.
   13641  * size - Size of data to read, 1=byte 2=word, 4=dword
   13642  * data - Pointer to the word to store the value read.
   13643  *****************************************************************************/
   13644 static int32_t
   13645 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13646     uint32_t size, uint32_t *data)
   13647 {
   13648 	uint16_t hsfsts;
   13649 	uint16_t hsflctl;
   13650 	uint32_t flash_linear_address;
   13651 	uint32_t flash_data = 0;
   13652 	int32_t error = 1;
   13653 	int32_t count = 0;
   13654 
   13655 	if (size < 1  || size > 4 || data == 0x0 ||
   13656 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13657 		return error;
   13658 
   13659 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13660 	    sc->sc_ich8_flash_base;
   13661 
   13662 	do {
   13663 		delay(1);
   13664 		/* Steps */
   13665 		error = wm_ich8_cycle_init(sc);
   13666 		if (error)
   13667 			break;
   13668 
   13669 		if (sc->sc_type >= WM_T_PCH_SPT)
   13670 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13671 			    >> 16;
   13672 		else
   13673 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13674 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13675 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13676 		    & HSFCTL_BCOUNT_MASK;
   13677 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13678 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13679 			/*
   13680 			 * In SPT, This register is in Lan memory space, not
   13681 			 * flash. Therefore, only 32 bit access is supported.
   13682 			 */
   13683 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13684 			    (uint32_t)hsflctl << 16);
   13685 		} else
   13686 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13687 
   13688 		/*
   13689 		 * Write the last 24 bits of index into Flash Linear address
   13690 		 * field in Flash Address
   13691 		 */
   13692 		/* TODO: TBD maybe check the index against the size of flash */
   13693 
   13694 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13695 
   13696 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13697 
   13698 		/*
   13699 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13700 		 * the whole sequence a few more times, else read in (shift in)
   13701 		 * the Flash Data0, the order is least significant byte first
   13702 		 * msb to lsb
   13703 		 */
   13704 		if (error == 0) {
   13705 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13706 			if (size == 1)
   13707 				*data = (uint8_t)(flash_data & 0x000000FF);
   13708 			else if (size == 2)
   13709 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13710 			else if (size == 4)
   13711 				*data = (uint32_t)flash_data;
   13712 			break;
   13713 		} else {
   13714 			/*
   13715 			 * If we've gotten here, then things are probably
   13716 			 * completely hosed, but if the error condition is
   13717 			 * detected, it won't hurt to give it another try...
   13718 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13719 			 */
   13720 			if (sc->sc_type >= WM_T_PCH_SPT)
   13721 				hsfsts = ICH8_FLASH_READ32(sc,
   13722 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13723 			else
   13724 				hsfsts = ICH8_FLASH_READ16(sc,
   13725 				    ICH_FLASH_HSFSTS);
   13726 
   13727 			if (hsfsts & HSFSTS_ERR) {
   13728 				/* Repeat for some time before giving up. */
   13729 				continue;
   13730 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13731 				break;
   13732 		}
   13733 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13734 
   13735 	return error;
   13736 }
   13737 
   13738 /******************************************************************************
   13739  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13740  *
   13741  * sc - pointer to wm_hw structure
   13742  * index - The index of the byte to read.
   13743  * data - Pointer to a byte to store the value read.
   13744  *****************************************************************************/
   13745 static int32_t
   13746 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13747 {
   13748 	int32_t status;
   13749 	uint32_t word = 0;
   13750 
   13751 	status = wm_read_ich8_data(sc, index, 1, &word);
   13752 	if (status == 0)
   13753 		*data = (uint8_t)word;
   13754 	else
   13755 		*data = 0;
   13756 
   13757 	return status;
   13758 }
   13759 
   13760 /******************************************************************************
   13761  * Reads a word from the NVM using the ICH8 flash access registers.
   13762  *
   13763  * sc - pointer to wm_hw structure
   13764  * index - The starting byte index of the word to read.
   13765  * data - Pointer to a word to store the value read.
   13766  *****************************************************************************/
   13767 static int32_t
   13768 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13769 {
   13770 	int32_t status;
   13771 	uint32_t word = 0;
   13772 
   13773 	status = wm_read_ich8_data(sc, index, 2, &word);
   13774 	if (status == 0)
   13775 		*data = (uint16_t)word;
   13776 	else
   13777 		*data = 0;
   13778 
   13779 	return status;
   13780 }
   13781 
   13782 /******************************************************************************
   13783  * Reads a dword from the NVM using the ICH8 flash access registers.
   13784  *
   13785  * sc - pointer to wm_hw structure
   13786  * index - The starting byte index of the word to read.
   13787  * data - Pointer to a word to store the value read.
   13788  *****************************************************************************/
   13789 static int32_t
   13790 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13791 {
   13792 	int32_t status;
   13793 
   13794 	status = wm_read_ich8_data(sc, index, 4, data);
   13795 	return status;
   13796 }
   13797 
   13798 /******************************************************************************
   13799  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13800  * register.
   13801  *
   13802  * sc - Struct containing variables accessed by shared code
   13803  * offset - offset of word in the EEPROM to read
   13804  * data - word read from the EEPROM
   13805  * words - number of words to read
   13806  *****************************************************************************/
   13807 static int
   13808 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13809 {
   13810 	int32_t	 rv = 0;
   13811 	uint32_t flash_bank = 0;
   13812 	uint32_t act_offset = 0;
   13813 	uint32_t bank_offset = 0;
   13814 	uint16_t word = 0;
   13815 	uint16_t i = 0;
   13816 
   13817 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13818 		device_xname(sc->sc_dev), __func__));
   13819 
   13820 	if (sc->nvm.acquire(sc) != 0)
   13821 		return -1;
   13822 
   13823 	/*
   13824 	 * We need to know which is the valid flash bank.  In the event
   13825 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13826 	 * managing flash_bank. So it cannot be trusted and needs
   13827 	 * to be updated with each read.
   13828 	 */
   13829 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13830 	if (rv) {
   13831 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13832 			device_xname(sc->sc_dev)));
   13833 		flash_bank = 0;
   13834 	}
   13835 
   13836 	/*
   13837 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13838 	 * size
   13839 	 */
   13840 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13841 
   13842 	for (i = 0; i < words; i++) {
   13843 		/* The NVM part needs a byte offset, hence * 2 */
   13844 		act_offset = bank_offset + ((offset + i) * 2);
   13845 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13846 		if (rv) {
   13847 			aprint_error_dev(sc->sc_dev,
   13848 			    "%s: failed to read NVM\n", __func__);
   13849 			break;
   13850 		}
   13851 		data[i] = word;
   13852 	}
   13853 
   13854 	sc->nvm.release(sc);
   13855 	return rv;
   13856 }
   13857 
   13858 /******************************************************************************
   13859  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13860  * register.
   13861  *
   13862  * sc - Struct containing variables accessed by shared code
   13863  * offset - offset of word in the EEPROM to read
   13864  * data - word read from the EEPROM
   13865  * words - number of words to read
   13866  *****************************************************************************/
   13867 static int
   13868 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13869 {
   13870 	int32_t	 rv = 0;
   13871 	uint32_t flash_bank = 0;
   13872 	uint32_t act_offset = 0;
   13873 	uint32_t bank_offset = 0;
   13874 	uint32_t dword = 0;
   13875 	uint16_t i = 0;
   13876 
   13877 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13878 		device_xname(sc->sc_dev), __func__));
   13879 
   13880 	if (sc->nvm.acquire(sc) != 0)
   13881 		return -1;
   13882 
   13883 	/*
   13884 	 * We need to know which is the valid flash bank.  In the event
   13885 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13886 	 * managing flash_bank. So it cannot be trusted and needs
   13887 	 * to be updated with each read.
   13888 	 */
   13889 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13890 	if (rv) {
   13891 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13892 			device_xname(sc->sc_dev)));
   13893 		flash_bank = 0;
   13894 	}
   13895 
   13896 	/*
   13897 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13898 	 * size
   13899 	 */
   13900 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13901 
   13902 	for (i = 0; i < words; i++) {
   13903 		/* The NVM part needs a byte offset, hence * 2 */
   13904 		act_offset = bank_offset + ((offset + i) * 2);
   13905 		/* but we must read dword aligned, so mask ... */
   13906 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13907 		if (rv) {
   13908 			aprint_error_dev(sc->sc_dev,
   13909 			    "%s: failed to read NVM\n", __func__);
   13910 			break;
   13911 		}
   13912 		/* ... and pick out low or high word */
   13913 		if ((act_offset & 0x2) == 0)
   13914 			data[i] = (uint16_t)(dword & 0xFFFF);
   13915 		else
   13916 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13917 	}
   13918 
   13919 	sc->nvm.release(sc);
   13920 	return rv;
   13921 }
   13922 
   13923 /* iNVM */
   13924 
   13925 static int
   13926 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13927 {
   13928 	int32_t	 rv = 0;
   13929 	uint32_t invm_dword;
   13930 	uint16_t i;
   13931 	uint8_t record_type, word_address;
   13932 
   13933 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13934 		device_xname(sc->sc_dev), __func__));
   13935 
   13936 	for (i = 0; i < INVM_SIZE; i++) {
   13937 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13938 		/* Get record type */
   13939 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13940 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13941 			break;
   13942 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13943 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13944 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13945 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13946 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13947 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13948 			if (word_address == address) {
   13949 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13950 				rv = 0;
   13951 				break;
   13952 			}
   13953 		}
   13954 	}
   13955 
   13956 	return rv;
   13957 }
   13958 
   13959 static int
   13960 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13961 {
   13962 	int rv = 0;
   13963 	int i;
   13964 
   13965 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13966 		device_xname(sc->sc_dev), __func__));
   13967 
   13968 	if (sc->nvm.acquire(sc) != 0)
   13969 		return -1;
   13970 
   13971 	for (i = 0; i < words; i++) {
   13972 		switch (offset + i) {
   13973 		case NVM_OFF_MACADDR:
   13974 		case NVM_OFF_MACADDR1:
   13975 		case NVM_OFF_MACADDR2:
   13976 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13977 			if (rv != 0) {
   13978 				data[i] = 0xffff;
   13979 				rv = -1;
   13980 			}
   13981 			break;
   13982 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13983 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13984 			if (rv != 0) {
   13985 				*data = INVM_DEFAULT_AL;
   13986 				rv = 0;
   13987 			}
   13988 			break;
   13989 		case NVM_OFF_CFG2:
   13990 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13991 			if (rv != 0) {
   13992 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13993 				rv = 0;
   13994 			}
   13995 			break;
   13996 		case NVM_OFF_CFG4:
   13997 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13998 			if (rv != 0) {
   13999 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14000 				rv = 0;
   14001 			}
   14002 			break;
   14003 		case NVM_OFF_LED_1_CFG:
   14004 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14005 			if (rv != 0) {
   14006 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14007 				rv = 0;
   14008 			}
   14009 			break;
   14010 		case NVM_OFF_LED_0_2_CFG:
   14011 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14012 			if (rv != 0) {
   14013 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14014 				rv = 0;
   14015 			}
   14016 			break;
   14017 		case NVM_OFF_ID_LED_SETTINGS:
   14018 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14019 			if (rv != 0) {
   14020 				*data = ID_LED_RESERVED_FFFF;
   14021 				rv = 0;
   14022 			}
   14023 			break;
   14024 		default:
   14025 			DPRINTF(sc, WM_DEBUG_NVM,
   14026 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14027 			*data = NVM_RESERVED_WORD;
   14028 			break;
   14029 		}
   14030 	}
   14031 
   14032 	sc->nvm.release(sc);
   14033 	return rv;
   14034 }
   14035 
   14036 /* Lock, detecting NVM type, validate checksum, version and read */
   14037 
   14038 static int
   14039 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14040 {
   14041 	uint32_t eecd = 0;
   14042 
   14043 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14044 	    || sc->sc_type == WM_T_82583) {
   14045 		eecd = CSR_READ(sc, WMREG_EECD);
   14046 
   14047 		/* Isolate bits 15 & 16 */
   14048 		eecd = ((eecd >> 15) & 0x03);
   14049 
   14050 		/* If both bits are set, device is Flash type */
   14051 		if (eecd == 0x03)
   14052 			return 0;
   14053 	}
   14054 	return 1;
   14055 }
   14056 
   14057 static int
   14058 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14059 {
   14060 	uint32_t eec;
   14061 
   14062 	eec = CSR_READ(sc, WMREG_EEC);
   14063 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14064 		return 1;
   14065 
   14066 	return 0;
   14067 }
   14068 
   14069 /*
   14070  * wm_nvm_validate_checksum
   14071  *
   14072  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14073  */
   14074 static int
   14075 wm_nvm_validate_checksum(struct wm_softc *sc)
   14076 {
   14077 	uint16_t checksum;
   14078 	uint16_t eeprom_data;
   14079 #ifdef WM_DEBUG
   14080 	uint16_t csum_wordaddr, valid_checksum;
   14081 #endif
   14082 	int i;
   14083 
   14084 	checksum = 0;
   14085 
   14086 	/* Don't check for I211 */
   14087 	if (sc->sc_type == WM_T_I211)
   14088 		return 0;
   14089 
   14090 #ifdef WM_DEBUG
   14091 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14092 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14093 		csum_wordaddr = NVM_OFF_COMPAT;
   14094 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14095 	} else {
   14096 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14097 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14098 	}
   14099 
   14100 	/* Dump EEPROM image for debug */
   14101 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14102 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14103 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14104 		/* XXX PCH_SPT? */
   14105 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14106 		if ((eeprom_data & valid_checksum) == 0)
   14107 			DPRINTF(sc, WM_DEBUG_NVM,
   14108 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14109 				device_xname(sc->sc_dev), eeprom_data,
   14110 				    valid_checksum));
   14111 	}
   14112 
   14113 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14114 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14115 		for (i = 0; i < NVM_SIZE; i++) {
   14116 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14117 				printf("XXXX ");
   14118 			else
   14119 				printf("%04hx ", eeprom_data);
   14120 			if (i % 8 == 7)
   14121 				printf("\n");
   14122 		}
   14123 	}
   14124 
   14125 #endif /* WM_DEBUG */
   14126 
   14127 	for (i = 0; i < NVM_SIZE; i++) {
   14128 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14129 			return 1;
   14130 		checksum += eeprom_data;
   14131 	}
   14132 
   14133 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14134 #ifdef WM_DEBUG
   14135 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14136 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14137 #endif
   14138 	}
   14139 
   14140 	return 0;
   14141 }
   14142 
   14143 static void
   14144 wm_nvm_version_invm(struct wm_softc *sc)
   14145 {
   14146 	uint32_t dword;
   14147 
   14148 	/*
   14149 	 * Linux's code to decode version is very strange, so we don't
   14150 	 * obey that algorithm and just use word 61 as the document.
   14151 	 * Perhaps it's not perfect though...
   14152 	 *
   14153 	 * Example:
   14154 	 *
   14155 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14156 	 */
   14157 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14158 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14159 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14160 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14161 }
   14162 
   14163 static void
   14164 wm_nvm_version(struct wm_softc *sc)
   14165 {
   14166 	uint16_t major, minor, build, patch;
   14167 	uint16_t uid0, uid1;
   14168 	uint16_t nvm_data;
   14169 	uint16_t off;
   14170 	bool check_version = false;
   14171 	bool check_optionrom = false;
   14172 	bool have_build = false;
   14173 	bool have_uid = true;
   14174 
   14175 	/*
   14176 	 * Version format:
   14177 	 *
   14178 	 * XYYZ
   14179 	 * X0YZ
   14180 	 * X0YY
   14181 	 *
   14182 	 * Example:
   14183 	 *
   14184 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14185 	 *	82571	0x50a6	5.10.6?
   14186 	 *	82572	0x506a	5.6.10?
   14187 	 *	82572EI	0x5069	5.6.9?
   14188 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14189 	 *		0x2013	2.1.3?
   14190 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14191 	 * ICH8+82567	0x0040	0.4.0?
   14192 	 * ICH9+82566	0x1040	1.4.0?
   14193 	 *ICH10+82567	0x0043	0.4.3?
   14194 	 *  PCH+82577	0x00c1	0.12.1?
   14195 	 * PCH2+82579	0x00d3	0.13.3?
   14196 	 *		0x00d4	0.13.4?
   14197 	 *  LPT+I218	0x0023	0.2.3?
   14198 	 *  SPT+I219	0x0084	0.8.4?
   14199 	 *  CNP+I219	0x0054	0.5.4?
   14200 	 */
   14201 
   14202 	/*
   14203 	 * XXX
   14204 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14205 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   14206 	 */
   14207 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14208 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14209 		have_uid = false;
   14210 
   14211 	switch (sc->sc_type) {
   14212 	case WM_T_82571:
   14213 	case WM_T_82572:
   14214 	case WM_T_82574:
   14215 	case WM_T_82583:
   14216 		check_version = true;
   14217 		check_optionrom = true;
   14218 		have_build = true;
   14219 		break;
   14220 	case WM_T_ICH8:
   14221 	case WM_T_ICH9:
   14222 	case WM_T_ICH10:
   14223 	case WM_T_PCH:
   14224 	case WM_T_PCH2:
   14225 	case WM_T_PCH_LPT:
   14226 	case WM_T_PCH_SPT:
   14227 	case WM_T_PCH_CNP:
   14228 		check_version = true;
   14229 		have_build = true;
   14230 		have_uid = false;
   14231 		break;
   14232 	case WM_T_82575:
   14233 	case WM_T_82576:
   14234 	case WM_T_82580:
   14235 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14236 			check_version = true;
   14237 		break;
   14238 	case WM_T_I211:
   14239 		wm_nvm_version_invm(sc);
   14240 		have_uid = false;
   14241 		goto printver;
   14242 	case WM_T_I210:
   14243 		if (!wm_nvm_flash_presence_i210(sc)) {
   14244 			wm_nvm_version_invm(sc);
   14245 			have_uid = false;
   14246 			goto printver;
   14247 		}
   14248 		/* FALLTHROUGH */
   14249 	case WM_T_I350:
   14250 	case WM_T_I354:
   14251 		check_version = true;
   14252 		check_optionrom = true;
   14253 		break;
   14254 	default:
   14255 		return;
   14256 	}
   14257 	if (check_version
   14258 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14259 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14260 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14261 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14262 			build = nvm_data & NVM_BUILD_MASK;
   14263 			have_build = true;
   14264 		} else
   14265 			minor = nvm_data & 0x00ff;
   14266 
   14267 		/* Decimal */
   14268 		minor = (minor / 16) * 10 + (minor % 16);
   14269 		sc->sc_nvm_ver_major = major;
   14270 		sc->sc_nvm_ver_minor = minor;
   14271 
   14272 printver:
   14273 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14274 		    sc->sc_nvm_ver_minor);
   14275 		if (have_build) {
   14276 			sc->sc_nvm_ver_build = build;
   14277 			aprint_verbose(".%d", build);
   14278 		}
   14279 	}
   14280 
   14281 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14282 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14283 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14284 		/* Option ROM Version */
   14285 		if ((off != 0x0000) && (off != 0xffff)) {
   14286 			int rv;
   14287 
   14288 			off += NVM_COMBO_VER_OFF;
   14289 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14290 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14291 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14292 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14293 				/* 16bits */
   14294 				major = uid0 >> 8;
   14295 				build = (uid0 << 8) | (uid1 >> 8);
   14296 				patch = uid1 & 0x00ff;
   14297 				aprint_verbose(", option ROM Version %d.%d.%d",
   14298 				    major, build, patch);
   14299 			}
   14300 		}
   14301 	}
   14302 
   14303 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14304 		aprint_verbose(", Image Unique ID %08x",
   14305 		    ((uint32_t)uid1 << 16) | uid0);
   14306 }
   14307 
   14308 /*
   14309  * wm_nvm_read:
   14310  *
   14311  *	Read data from the serial EEPROM.
   14312  */
   14313 static int
   14314 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14315 {
   14316 	int rv;
   14317 
   14318 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14319 		device_xname(sc->sc_dev), __func__));
   14320 
   14321 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14322 		return -1;
   14323 
   14324 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14325 
   14326 	return rv;
   14327 }
   14328 
   14329 /*
   14330  * Hardware semaphores.
   14331  * Very complexed...
   14332  */
   14333 
   14334 static int
   14335 wm_get_null(struct wm_softc *sc)
   14336 {
   14337 
   14338 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14339 		device_xname(sc->sc_dev), __func__));
   14340 	return 0;
   14341 }
   14342 
   14343 static void
   14344 wm_put_null(struct wm_softc *sc)
   14345 {
   14346 
   14347 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14348 		device_xname(sc->sc_dev), __func__));
   14349 	return;
   14350 }
   14351 
   14352 static int
   14353 wm_get_eecd(struct wm_softc *sc)
   14354 {
   14355 	uint32_t reg;
   14356 	int x;
   14357 
   14358 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14359 		device_xname(sc->sc_dev), __func__));
   14360 
   14361 	reg = CSR_READ(sc, WMREG_EECD);
   14362 
   14363 	/* Request EEPROM access. */
   14364 	reg |= EECD_EE_REQ;
   14365 	CSR_WRITE(sc, WMREG_EECD, reg);
   14366 
   14367 	/* ..and wait for it to be granted. */
   14368 	for (x = 0; x < 1000; x++) {
   14369 		reg = CSR_READ(sc, WMREG_EECD);
   14370 		if (reg & EECD_EE_GNT)
   14371 			break;
   14372 		delay(5);
   14373 	}
   14374 	if ((reg & EECD_EE_GNT) == 0) {
   14375 		aprint_error_dev(sc->sc_dev,
   14376 		    "could not acquire EEPROM GNT\n");
   14377 		reg &= ~EECD_EE_REQ;
   14378 		CSR_WRITE(sc, WMREG_EECD, reg);
   14379 		return -1;
   14380 	}
   14381 
   14382 	return 0;
   14383 }
   14384 
   14385 static void
   14386 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14387 {
   14388 
   14389 	*eecd |= EECD_SK;
   14390 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14391 	CSR_WRITE_FLUSH(sc);
   14392 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14393 		delay(1);
   14394 	else
   14395 		delay(50);
   14396 }
   14397 
   14398 static void
   14399 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14400 {
   14401 
   14402 	*eecd &= ~EECD_SK;
   14403 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14404 	CSR_WRITE_FLUSH(sc);
   14405 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14406 		delay(1);
   14407 	else
   14408 		delay(50);
   14409 }
   14410 
   14411 static void
   14412 wm_put_eecd(struct wm_softc *sc)
   14413 {
   14414 	uint32_t reg;
   14415 
   14416 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14417 		device_xname(sc->sc_dev), __func__));
   14418 
   14419 	/* Stop nvm */
   14420 	reg = CSR_READ(sc, WMREG_EECD);
   14421 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14422 		/* Pull CS high */
   14423 		reg |= EECD_CS;
   14424 		wm_nvm_eec_clock_lower(sc, &reg);
   14425 	} else {
   14426 		/* CS on Microwire is active-high */
   14427 		reg &= ~(EECD_CS | EECD_DI);
   14428 		CSR_WRITE(sc, WMREG_EECD, reg);
   14429 		wm_nvm_eec_clock_raise(sc, &reg);
   14430 		wm_nvm_eec_clock_lower(sc, &reg);
   14431 	}
   14432 
   14433 	reg = CSR_READ(sc, WMREG_EECD);
   14434 	reg &= ~EECD_EE_REQ;
   14435 	CSR_WRITE(sc, WMREG_EECD, reg);
   14436 
   14437 	return;
   14438 }
   14439 
   14440 /*
   14441  * Get hardware semaphore.
   14442  * Same as e1000_get_hw_semaphore_generic()
   14443  */
   14444 static int
   14445 wm_get_swsm_semaphore(struct wm_softc *sc)
   14446 {
   14447 	int32_t timeout;
   14448 	uint32_t swsm;
   14449 
   14450 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14451 		device_xname(sc->sc_dev), __func__));
   14452 	KASSERT(sc->sc_nvm_wordsize > 0);
   14453 
   14454 retry:
   14455 	/* Get the SW semaphore. */
   14456 	timeout = sc->sc_nvm_wordsize + 1;
   14457 	while (timeout) {
   14458 		swsm = CSR_READ(sc, WMREG_SWSM);
   14459 
   14460 		if ((swsm & SWSM_SMBI) == 0)
   14461 			break;
   14462 
   14463 		delay(50);
   14464 		timeout--;
   14465 	}
   14466 
   14467 	if (timeout == 0) {
   14468 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14469 			/*
   14470 			 * In rare circumstances, the SW semaphore may already
   14471 			 * be held unintentionally. Clear the semaphore once
   14472 			 * before giving up.
   14473 			 */
   14474 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14475 			wm_put_swsm_semaphore(sc);
   14476 			goto retry;
   14477 		}
   14478 		aprint_error_dev(sc->sc_dev,
   14479 		    "could not acquire SWSM SMBI\n");
   14480 		return 1;
   14481 	}
   14482 
   14483 	/* Get the FW semaphore. */
   14484 	timeout = sc->sc_nvm_wordsize + 1;
   14485 	while (timeout) {
   14486 		swsm = CSR_READ(sc, WMREG_SWSM);
   14487 		swsm |= SWSM_SWESMBI;
   14488 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14489 		/* If we managed to set the bit we got the semaphore. */
   14490 		swsm = CSR_READ(sc, WMREG_SWSM);
   14491 		if (swsm & SWSM_SWESMBI)
   14492 			break;
   14493 
   14494 		delay(50);
   14495 		timeout--;
   14496 	}
   14497 
   14498 	if (timeout == 0) {
   14499 		aprint_error_dev(sc->sc_dev,
   14500 		    "could not acquire SWSM SWESMBI\n");
   14501 		/* Release semaphores */
   14502 		wm_put_swsm_semaphore(sc);
   14503 		return 1;
   14504 	}
   14505 	return 0;
   14506 }
   14507 
   14508 /*
   14509  * Put hardware semaphore.
   14510  * Same as e1000_put_hw_semaphore_generic()
   14511  */
   14512 static void
   14513 wm_put_swsm_semaphore(struct wm_softc *sc)
   14514 {
   14515 	uint32_t swsm;
   14516 
   14517 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14518 		device_xname(sc->sc_dev), __func__));
   14519 
   14520 	swsm = CSR_READ(sc, WMREG_SWSM);
   14521 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14522 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14523 }
   14524 
   14525 /*
   14526  * Get SW/FW semaphore.
   14527  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14528  */
   14529 static int
   14530 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14531 {
   14532 	uint32_t swfw_sync;
   14533 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14534 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14535 	int timeout;
   14536 
   14537 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14538 		device_xname(sc->sc_dev), __func__));
   14539 
   14540 	if (sc->sc_type == WM_T_80003)
   14541 		timeout = 50;
   14542 	else
   14543 		timeout = 200;
   14544 
   14545 	while (timeout) {
   14546 		if (wm_get_swsm_semaphore(sc)) {
   14547 			aprint_error_dev(sc->sc_dev,
   14548 			    "%s: failed to get semaphore\n",
   14549 			    __func__);
   14550 			return 1;
   14551 		}
   14552 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14553 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14554 			swfw_sync |= swmask;
   14555 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14556 			wm_put_swsm_semaphore(sc);
   14557 			return 0;
   14558 		}
   14559 		wm_put_swsm_semaphore(sc);
   14560 		delay(5000);
   14561 		timeout--;
   14562 	}
   14563 	device_printf(sc->sc_dev,
   14564 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14565 	    mask, swfw_sync);
   14566 	return 1;
   14567 }
   14568 
   14569 static void
   14570 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14571 {
   14572 	uint32_t swfw_sync;
   14573 
   14574 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14575 		device_xname(sc->sc_dev), __func__));
   14576 
   14577 	while (wm_get_swsm_semaphore(sc) != 0)
   14578 		continue;
   14579 
   14580 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14581 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14582 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14583 
   14584 	wm_put_swsm_semaphore(sc);
   14585 }
   14586 
   14587 static int
   14588 wm_get_nvm_80003(struct wm_softc *sc)
   14589 {
   14590 	int rv;
   14591 
   14592 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14593 		device_xname(sc->sc_dev), __func__));
   14594 
   14595 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14596 		aprint_error_dev(sc->sc_dev,
   14597 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14598 		return rv;
   14599 	}
   14600 
   14601 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14602 	    && (rv = wm_get_eecd(sc)) != 0) {
   14603 		aprint_error_dev(sc->sc_dev,
   14604 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14605 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14606 		return rv;
   14607 	}
   14608 
   14609 	return 0;
   14610 }
   14611 
   14612 static void
   14613 wm_put_nvm_80003(struct wm_softc *sc)
   14614 {
   14615 
   14616 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14617 		device_xname(sc->sc_dev), __func__));
   14618 
   14619 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14620 		wm_put_eecd(sc);
   14621 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14622 }
   14623 
   14624 static int
   14625 wm_get_nvm_82571(struct wm_softc *sc)
   14626 {
   14627 	int rv;
   14628 
   14629 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14630 		device_xname(sc->sc_dev), __func__));
   14631 
   14632 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14633 		return rv;
   14634 
   14635 	switch (sc->sc_type) {
   14636 	case WM_T_82573:
   14637 		break;
   14638 	default:
   14639 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14640 			rv = wm_get_eecd(sc);
   14641 		break;
   14642 	}
   14643 
   14644 	if (rv != 0) {
   14645 		aprint_error_dev(sc->sc_dev,
   14646 		    "%s: failed to get semaphore\n",
   14647 		    __func__);
   14648 		wm_put_swsm_semaphore(sc);
   14649 	}
   14650 
   14651 	return rv;
   14652 }
   14653 
   14654 static void
   14655 wm_put_nvm_82571(struct wm_softc *sc)
   14656 {
   14657 
   14658 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14659 		device_xname(sc->sc_dev), __func__));
   14660 
   14661 	switch (sc->sc_type) {
   14662 	case WM_T_82573:
   14663 		break;
   14664 	default:
   14665 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14666 			wm_put_eecd(sc);
   14667 		break;
   14668 	}
   14669 
   14670 	wm_put_swsm_semaphore(sc);
   14671 }
   14672 
   14673 static int
   14674 wm_get_phy_82575(struct wm_softc *sc)
   14675 {
   14676 
   14677 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14678 		device_xname(sc->sc_dev), __func__));
   14679 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14680 }
   14681 
   14682 static void
   14683 wm_put_phy_82575(struct wm_softc *sc)
   14684 {
   14685 
   14686 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14687 		device_xname(sc->sc_dev), __func__));
   14688 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14689 }
   14690 
   14691 static int
   14692 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14693 {
   14694 	uint32_t ext_ctrl;
   14695 	int timeout = 200;
   14696 
   14697 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14698 		device_xname(sc->sc_dev), __func__));
   14699 
   14700 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14701 	for (timeout = 0; timeout < 200; timeout++) {
   14702 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14703 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14704 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14705 
   14706 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14707 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14708 			return 0;
   14709 		delay(5000);
   14710 	}
   14711 	device_printf(sc->sc_dev,
   14712 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14713 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14714 	return 1;
   14715 }
   14716 
   14717 static void
   14718 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14719 {
   14720 	uint32_t ext_ctrl;
   14721 
   14722 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14723 		device_xname(sc->sc_dev), __func__));
   14724 
   14725 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14726 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14727 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14728 
   14729 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14730 }
   14731 
   14732 static int
   14733 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14734 {
   14735 	uint32_t ext_ctrl;
   14736 	int timeout;
   14737 
   14738 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14739 		device_xname(sc->sc_dev), __func__));
   14740 	mutex_enter(sc->sc_ich_phymtx);
   14741 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14742 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14743 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14744 			break;
   14745 		delay(1000);
   14746 	}
   14747 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14748 		device_printf(sc->sc_dev,
   14749 		    "SW has already locked the resource\n");
   14750 		goto out;
   14751 	}
   14752 
   14753 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14754 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14755 	for (timeout = 0; timeout < 1000; timeout++) {
   14756 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14757 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14758 			break;
   14759 		delay(1000);
   14760 	}
   14761 	if (timeout >= 1000) {
   14762 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14763 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14764 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14765 		goto out;
   14766 	}
   14767 	return 0;
   14768 
   14769 out:
   14770 	mutex_exit(sc->sc_ich_phymtx);
   14771 	return 1;
   14772 }
   14773 
   14774 static void
   14775 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14776 {
   14777 	uint32_t ext_ctrl;
   14778 
   14779 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14780 		device_xname(sc->sc_dev), __func__));
   14781 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14782 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14783 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14784 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14785 	} else {
   14786 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14787 	}
   14788 
   14789 	mutex_exit(sc->sc_ich_phymtx);
   14790 }
   14791 
   14792 static int
   14793 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14794 {
   14795 
   14796 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14797 		device_xname(sc->sc_dev), __func__));
   14798 	mutex_enter(sc->sc_ich_nvmmtx);
   14799 
   14800 	return 0;
   14801 }
   14802 
   14803 static void
   14804 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14805 {
   14806 
   14807 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14808 		device_xname(sc->sc_dev), __func__));
   14809 	mutex_exit(sc->sc_ich_nvmmtx);
   14810 }
   14811 
   14812 static int
   14813 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14814 {
   14815 	int i = 0;
   14816 	uint32_t reg;
   14817 
   14818 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14819 		device_xname(sc->sc_dev), __func__));
   14820 
   14821 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14822 	do {
   14823 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14824 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14825 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14826 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14827 			break;
   14828 		delay(2*1000);
   14829 		i++;
   14830 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14831 
   14832 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14833 		wm_put_hw_semaphore_82573(sc);
   14834 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14835 		    device_xname(sc->sc_dev));
   14836 		return -1;
   14837 	}
   14838 
   14839 	return 0;
   14840 }
   14841 
   14842 static void
   14843 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14844 {
   14845 	uint32_t reg;
   14846 
   14847 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14848 		device_xname(sc->sc_dev), __func__));
   14849 
   14850 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14851 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14852 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14853 }
   14854 
   14855 /*
   14856  * Management mode and power management related subroutines.
   14857  * BMC, AMT, suspend/resume and EEE.
   14858  */
   14859 
   14860 #ifdef WM_WOL
   14861 static int
   14862 wm_check_mng_mode(struct wm_softc *sc)
   14863 {
   14864 	int rv;
   14865 
   14866 	switch (sc->sc_type) {
   14867 	case WM_T_ICH8:
   14868 	case WM_T_ICH9:
   14869 	case WM_T_ICH10:
   14870 	case WM_T_PCH:
   14871 	case WM_T_PCH2:
   14872 	case WM_T_PCH_LPT:
   14873 	case WM_T_PCH_SPT:
   14874 	case WM_T_PCH_CNP:
   14875 		rv = wm_check_mng_mode_ich8lan(sc);
   14876 		break;
   14877 	case WM_T_82574:
   14878 	case WM_T_82583:
   14879 		rv = wm_check_mng_mode_82574(sc);
   14880 		break;
   14881 	case WM_T_82571:
   14882 	case WM_T_82572:
   14883 	case WM_T_82573:
   14884 	case WM_T_80003:
   14885 		rv = wm_check_mng_mode_generic(sc);
   14886 		break;
   14887 	default:
   14888 		/* Noting to do */
   14889 		rv = 0;
   14890 		break;
   14891 	}
   14892 
   14893 	return rv;
   14894 }
   14895 
   14896 static int
   14897 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14898 {
   14899 	uint32_t fwsm;
   14900 
   14901 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14902 
   14903 	if (((fwsm & FWSM_FW_VALID) != 0)
   14904 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14905 		return 1;
   14906 
   14907 	return 0;
   14908 }
   14909 
   14910 static int
   14911 wm_check_mng_mode_82574(struct wm_softc *sc)
   14912 {
   14913 	uint16_t data;
   14914 
   14915 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14916 
   14917 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14918 		return 1;
   14919 
   14920 	return 0;
   14921 }
   14922 
   14923 static int
   14924 wm_check_mng_mode_generic(struct wm_softc *sc)
   14925 {
   14926 	uint32_t fwsm;
   14927 
   14928 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14929 
   14930 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14931 		return 1;
   14932 
   14933 	return 0;
   14934 }
   14935 #endif /* WM_WOL */
   14936 
   14937 static int
   14938 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14939 {
   14940 	uint32_t manc, fwsm, factps;
   14941 
   14942 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14943 		return 0;
   14944 
   14945 	manc = CSR_READ(sc, WMREG_MANC);
   14946 
   14947 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14948 		device_xname(sc->sc_dev), manc));
   14949 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14950 		return 0;
   14951 
   14952 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14953 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14954 		factps = CSR_READ(sc, WMREG_FACTPS);
   14955 		if (((factps & FACTPS_MNGCG) == 0)
   14956 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14957 			return 1;
   14958 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14959 		uint16_t data;
   14960 
   14961 		factps = CSR_READ(sc, WMREG_FACTPS);
   14962 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14963 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14964 			device_xname(sc->sc_dev), factps, data));
   14965 		if (((factps & FACTPS_MNGCG) == 0)
   14966 		    && ((data & NVM_CFG2_MNGM_MASK)
   14967 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14968 			return 1;
   14969 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14970 	    && ((manc & MANC_ASF_EN) == 0))
   14971 		return 1;
   14972 
   14973 	return 0;
   14974 }
   14975 
   14976 static bool
   14977 wm_phy_resetisblocked(struct wm_softc *sc)
   14978 {
   14979 	bool blocked = false;
   14980 	uint32_t reg;
   14981 	int i = 0;
   14982 
   14983 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14984 		device_xname(sc->sc_dev), __func__));
   14985 
   14986 	switch (sc->sc_type) {
   14987 	case WM_T_ICH8:
   14988 	case WM_T_ICH9:
   14989 	case WM_T_ICH10:
   14990 	case WM_T_PCH:
   14991 	case WM_T_PCH2:
   14992 	case WM_T_PCH_LPT:
   14993 	case WM_T_PCH_SPT:
   14994 	case WM_T_PCH_CNP:
   14995 		do {
   14996 			reg = CSR_READ(sc, WMREG_FWSM);
   14997 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14998 				blocked = true;
   14999 				delay(10*1000);
   15000 				continue;
   15001 			}
   15002 			blocked = false;
   15003 		} while (blocked && (i++ < 30));
   15004 		return blocked;
   15005 		break;
   15006 	case WM_T_82571:
   15007 	case WM_T_82572:
   15008 	case WM_T_82573:
   15009 	case WM_T_82574:
   15010 	case WM_T_82583:
   15011 	case WM_T_80003:
   15012 		reg = CSR_READ(sc, WMREG_MANC);
   15013 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15014 			return true;
   15015 		else
   15016 			return false;
   15017 		break;
   15018 	default:
   15019 		/* No problem */
   15020 		break;
   15021 	}
   15022 
   15023 	return false;
   15024 }
   15025 
   15026 static void
   15027 wm_get_hw_control(struct wm_softc *sc)
   15028 {
   15029 	uint32_t reg;
   15030 
   15031 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15032 		device_xname(sc->sc_dev), __func__));
   15033 
   15034 	if (sc->sc_type == WM_T_82573) {
   15035 		reg = CSR_READ(sc, WMREG_SWSM);
   15036 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15037 	} else if (sc->sc_type >= WM_T_82571) {
   15038 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15039 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15040 	}
   15041 }
   15042 
   15043 static void
   15044 wm_release_hw_control(struct wm_softc *sc)
   15045 {
   15046 	uint32_t reg;
   15047 
   15048 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15049 		device_xname(sc->sc_dev), __func__));
   15050 
   15051 	if (sc->sc_type == WM_T_82573) {
   15052 		reg = CSR_READ(sc, WMREG_SWSM);
   15053 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15054 	} else if (sc->sc_type >= WM_T_82571) {
   15055 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15056 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15057 	}
   15058 }
   15059 
   15060 static void
   15061 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15062 {
   15063 	uint32_t reg;
   15064 
   15065 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15066 		device_xname(sc->sc_dev), __func__));
   15067 
   15068 	if (sc->sc_type < WM_T_PCH2)
   15069 		return;
   15070 
   15071 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15072 
   15073 	if (gate)
   15074 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15075 	else
   15076 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15077 
   15078 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15079 }
   15080 
   15081 static int
   15082 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15083 {
   15084 	uint32_t fwsm, reg;
   15085 	int rv = 0;
   15086 
   15087 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15088 		device_xname(sc->sc_dev), __func__));
   15089 
   15090 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15091 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15092 
   15093 	/* Disable ULP */
   15094 	wm_ulp_disable(sc);
   15095 
   15096 	/* Acquire PHY semaphore */
   15097 	rv = sc->phy.acquire(sc);
   15098 	if (rv != 0) {
   15099 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15100 		device_xname(sc->sc_dev), __func__));
   15101 		return -1;
   15102 	}
   15103 
   15104 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15105 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15106 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15107 	 */
   15108 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15109 	switch (sc->sc_type) {
   15110 	case WM_T_PCH_LPT:
   15111 	case WM_T_PCH_SPT:
   15112 	case WM_T_PCH_CNP:
   15113 		if (wm_phy_is_accessible_pchlan(sc))
   15114 			break;
   15115 
   15116 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15117 		 * forcing MAC to SMBus mode first.
   15118 		 */
   15119 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15120 		reg |= CTRL_EXT_FORCE_SMBUS;
   15121 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15122 #if 0
   15123 		/* XXX Isn't this required??? */
   15124 		CSR_WRITE_FLUSH(sc);
   15125 #endif
   15126 		/* Wait 50 milliseconds for MAC to finish any retries
   15127 		 * that it might be trying to perform from previous
   15128 		 * attempts to acknowledge any phy read requests.
   15129 		 */
   15130 		delay(50 * 1000);
   15131 		/* FALLTHROUGH */
   15132 	case WM_T_PCH2:
   15133 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15134 			break;
   15135 		/* FALLTHROUGH */
   15136 	case WM_T_PCH:
   15137 		if (sc->sc_type == WM_T_PCH)
   15138 			if ((fwsm & FWSM_FW_VALID) != 0)
   15139 				break;
   15140 
   15141 		if (wm_phy_resetisblocked(sc) == true) {
   15142 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15143 			break;
   15144 		}
   15145 
   15146 		/* Toggle LANPHYPC Value bit */
   15147 		wm_toggle_lanphypc_pch_lpt(sc);
   15148 
   15149 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15150 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15151 				break;
   15152 
   15153 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15154 			 * so ensure that the MAC is also out of SMBus mode
   15155 			 */
   15156 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15157 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15158 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15159 
   15160 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15161 				break;
   15162 			rv = -1;
   15163 		}
   15164 		break;
   15165 	default:
   15166 		break;
   15167 	}
   15168 
   15169 	/* Release semaphore */
   15170 	sc->phy.release(sc);
   15171 
   15172 	if (rv == 0) {
   15173 		/* Check to see if able to reset PHY.  Print error if not */
   15174 		if (wm_phy_resetisblocked(sc)) {
   15175 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15176 			goto out;
   15177 		}
   15178 
   15179 		/* Reset the PHY before any access to it.  Doing so, ensures
   15180 		 * that the PHY is in a known good state before we read/write
   15181 		 * PHY registers.  The generic reset is sufficient here,
   15182 		 * because we haven't determined the PHY type yet.
   15183 		 */
   15184 		if (wm_reset_phy(sc) != 0)
   15185 			goto out;
   15186 
   15187 		/* On a successful reset, possibly need to wait for the PHY
   15188 		 * to quiesce to an accessible state before returning control
   15189 		 * to the calling function.  If the PHY does not quiesce, then
   15190 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15191 		 *  the PHY is in.
   15192 		 */
   15193 		if (wm_phy_resetisblocked(sc))
   15194 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15195 	}
   15196 
   15197 out:
   15198 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15199 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15200 		delay(10*1000);
   15201 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15202 	}
   15203 
   15204 	return 0;
   15205 }
   15206 
   15207 static void
   15208 wm_init_manageability(struct wm_softc *sc)
   15209 {
   15210 
   15211 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15212 		device_xname(sc->sc_dev), __func__));
   15213 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15214 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15215 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15216 
   15217 		/* Disable hardware interception of ARP */
   15218 		manc &= ~MANC_ARP_EN;
   15219 
   15220 		/* Enable receiving management packets to the host */
   15221 		if (sc->sc_type >= WM_T_82571) {
   15222 			manc |= MANC_EN_MNG2HOST;
   15223 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15224 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15225 		}
   15226 
   15227 		CSR_WRITE(sc, WMREG_MANC, manc);
   15228 	}
   15229 }
   15230 
   15231 static void
   15232 wm_release_manageability(struct wm_softc *sc)
   15233 {
   15234 
   15235 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15236 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15237 
   15238 		manc |= MANC_ARP_EN;
   15239 		if (sc->sc_type >= WM_T_82571)
   15240 			manc &= ~MANC_EN_MNG2HOST;
   15241 
   15242 		CSR_WRITE(sc, WMREG_MANC, manc);
   15243 	}
   15244 }
   15245 
   15246 static void
   15247 wm_get_wakeup(struct wm_softc *sc)
   15248 {
   15249 
   15250 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15251 	switch (sc->sc_type) {
   15252 	case WM_T_82573:
   15253 	case WM_T_82583:
   15254 		sc->sc_flags |= WM_F_HAS_AMT;
   15255 		/* FALLTHROUGH */
   15256 	case WM_T_80003:
   15257 	case WM_T_82575:
   15258 	case WM_T_82576:
   15259 	case WM_T_82580:
   15260 	case WM_T_I350:
   15261 	case WM_T_I354:
   15262 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15263 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15264 		/* FALLTHROUGH */
   15265 	case WM_T_82541:
   15266 	case WM_T_82541_2:
   15267 	case WM_T_82547:
   15268 	case WM_T_82547_2:
   15269 	case WM_T_82571:
   15270 	case WM_T_82572:
   15271 	case WM_T_82574:
   15272 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15273 		break;
   15274 	case WM_T_ICH8:
   15275 	case WM_T_ICH9:
   15276 	case WM_T_ICH10:
   15277 	case WM_T_PCH:
   15278 	case WM_T_PCH2:
   15279 	case WM_T_PCH_LPT:
   15280 	case WM_T_PCH_SPT:
   15281 	case WM_T_PCH_CNP:
   15282 		sc->sc_flags |= WM_F_HAS_AMT;
   15283 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15284 		break;
   15285 	default:
   15286 		break;
   15287 	}
   15288 
   15289 	/* 1: HAS_MANAGE */
   15290 	if (wm_enable_mng_pass_thru(sc) != 0)
   15291 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15292 
   15293 	/*
   15294 	 * Note that the WOL flags is set after the resetting of the eeprom
   15295 	 * stuff
   15296 	 */
   15297 }
   15298 
   15299 /*
   15300  * Unconfigure Ultra Low Power mode.
   15301  * Only for I217 and newer (see below).
   15302  */
   15303 static int
   15304 wm_ulp_disable(struct wm_softc *sc)
   15305 {
   15306 	uint32_t reg;
   15307 	uint16_t phyreg;
   15308 	int i = 0, rv = 0;
   15309 
   15310 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15311 		device_xname(sc->sc_dev), __func__));
   15312 	/* Exclude old devices */
   15313 	if ((sc->sc_type < WM_T_PCH_LPT)
   15314 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15315 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15316 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15317 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15318 		return 0;
   15319 
   15320 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15321 		/* Request ME un-configure ULP mode in the PHY */
   15322 		reg = CSR_READ(sc, WMREG_H2ME);
   15323 		reg &= ~H2ME_ULP;
   15324 		reg |= H2ME_ENFORCE_SETTINGS;
   15325 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15326 
   15327 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15328 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15329 			if (i++ == 30) {
   15330 				device_printf(sc->sc_dev, "%s timed out\n",
   15331 				    __func__);
   15332 				return -1;
   15333 			}
   15334 			delay(10 * 1000);
   15335 		}
   15336 		reg = CSR_READ(sc, WMREG_H2ME);
   15337 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15338 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15339 
   15340 		return 0;
   15341 	}
   15342 
   15343 	/* Acquire semaphore */
   15344 	rv = sc->phy.acquire(sc);
   15345 	if (rv != 0) {
   15346 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15347 		device_xname(sc->sc_dev), __func__));
   15348 		return -1;
   15349 	}
   15350 
   15351 	/* Toggle LANPHYPC */
   15352 	wm_toggle_lanphypc_pch_lpt(sc);
   15353 
   15354 	/* Unforce SMBus mode in PHY */
   15355 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15356 	if (rv != 0) {
   15357 		uint32_t reg2;
   15358 
   15359 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15360 			__func__);
   15361 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15362 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15363 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15364 		delay(50 * 1000);
   15365 
   15366 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15367 		    &phyreg);
   15368 		if (rv != 0)
   15369 			goto release;
   15370 	}
   15371 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15372 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15373 
   15374 	/* Unforce SMBus mode in MAC */
   15375 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15376 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15377 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15378 
   15379 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15380 	if (rv != 0)
   15381 		goto release;
   15382 	phyreg |= HV_PM_CTRL_K1_ENA;
   15383 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15384 
   15385 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15386 		&phyreg);
   15387 	if (rv != 0)
   15388 		goto release;
   15389 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15390 	    | I218_ULP_CONFIG1_STICKY_ULP
   15391 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15392 	    | I218_ULP_CONFIG1_WOL_HOST
   15393 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15394 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15395 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15396 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15397 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15398 	phyreg |= I218_ULP_CONFIG1_START;
   15399 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15400 
   15401 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15402 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15403 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15404 
   15405 release:
   15406 	/* Release semaphore */
   15407 	sc->phy.release(sc);
   15408 	wm_gmii_reset(sc);
   15409 	delay(50 * 1000);
   15410 
   15411 	return rv;
   15412 }
   15413 
   15414 /* WOL in the newer chipset interfaces (pchlan) */
   15415 static int
   15416 wm_enable_phy_wakeup(struct wm_softc *sc)
   15417 {
   15418 	device_t dev = sc->sc_dev;
   15419 	uint32_t mreg, moff;
   15420 	uint16_t wuce, wuc, wufc, preg;
   15421 	int i, rv;
   15422 
   15423 	KASSERT(sc->sc_type >= WM_T_PCH);
   15424 
   15425 	/* Copy MAC RARs to PHY RARs */
   15426 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15427 
   15428 	/* Activate PHY wakeup */
   15429 	rv = sc->phy.acquire(sc);
   15430 	if (rv != 0) {
   15431 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15432 		    __func__);
   15433 		return rv;
   15434 	}
   15435 
   15436 	/*
   15437 	 * Enable access to PHY wakeup registers.
   15438 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15439 	 */
   15440 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15441 	if (rv != 0) {
   15442 		device_printf(dev,
   15443 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15444 		goto release;
   15445 	}
   15446 
   15447 	/* Copy MAC MTA to PHY MTA */
   15448 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15449 		uint16_t lo, hi;
   15450 
   15451 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15452 		lo = (uint16_t)(mreg & 0xffff);
   15453 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15454 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15455 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15456 	}
   15457 
   15458 	/* Configure PHY Rx Control register */
   15459 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15460 	mreg = CSR_READ(sc, WMREG_RCTL);
   15461 	if (mreg & RCTL_UPE)
   15462 		preg |= BM_RCTL_UPE;
   15463 	if (mreg & RCTL_MPE)
   15464 		preg |= BM_RCTL_MPE;
   15465 	preg &= ~(BM_RCTL_MO_MASK);
   15466 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15467 	if (moff != 0)
   15468 		preg |= moff << BM_RCTL_MO_SHIFT;
   15469 	if (mreg & RCTL_BAM)
   15470 		preg |= BM_RCTL_BAM;
   15471 	if (mreg & RCTL_PMCF)
   15472 		preg |= BM_RCTL_PMCF;
   15473 	mreg = CSR_READ(sc, WMREG_CTRL);
   15474 	if (mreg & CTRL_RFCE)
   15475 		preg |= BM_RCTL_RFCE;
   15476 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15477 
   15478 	wuc = WUC_APME | WUC_PME_EN;
   15479 	wufc = WUFC_MAG;
   15480 	/* Enable PHY wakeup in MAC register */
   15481 	CSR_WRITE(sc, WMREG_WUC,
   15482 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15483 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15484 
   15485 	/* Configure and enable PHY wakeup in PHY registers */
   15486 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15487 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15488 
   15489 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15490 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15491 
   15492 release:
   15493 	sc->phy.release(sc);
   15494 
   15495 	return 0;
   15496 }
   15497 
   15498 /* Power down workaround on D3 */
   15499 static void
   15500 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15501 {
   15502 	uint32_t reg;
   15503 	uint16_t phyreg;
   15504 	int i;
   15505 
   15506 	for (i = 0; i < 2; i++) {
   15507 		/* Disable link */
   15508 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15509 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15510 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15511 
   15512 		/*
   15513 		 * Call gig speed drop workaround on Gig disable before
   15514 		 * accessing any PHY registers
   15515 		 */
   15516 		if (sc->sc_type == WM_T_ICH8)
   15517 			wm_gig_downshift_workaround_ich8lan(sc);
   15518 
   15519 		/* Write VR power-down enable */
   15520 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15521 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15522 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15523 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15524 
   15525 		/* Read it back and test */
   15526 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15527 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15528 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15529 			break;
   15530 
   15531 		/* Issue PHY reset and repeat at most one more time */
   15532 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15533 	}
   15534 }
   15535 
   15536 /*
   15537  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15538  *  @sc: pointer to the HW structure
   15539  *
   15540  *  During S0 to Sx transition, it is possible the link remains at gig
   15541  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15542  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15543  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15544  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15545  *  needs to be written.
   15546  *  Parts that support (and are linked to a partner which support) EEE in
   15547  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15548  *  than 10Mbps w/o EEE.
   15549  */
   15550 static void
   15551 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15552 {
   15553 	device_t dev = sc->sc_dev;
   15554 	struct ethercom *ec = &sc->sc_ethercom;
   15555 	uint32_t phy_ctrl;
   15556 	int rv;
   15557 
   15558 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15559 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15560 
   15561 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15562 
   15563 	if (sc->sc_phytype == WMPHY_I217) {
   15564 		uint16_t devid = sc->sc_pcidevid;
   15565 
   15566 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15567 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15568 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15569 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15570 		    (sc->sc_type >= WM_T_PCH_SPT))
   15571 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15572 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15573 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15574 
   15575 		if (sc->phy.acquire(sc) != 0)
   15576 			goto out;
   15577 
   15578 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15579 			uint16_t eee_advert;
   15580 
   15581 			rv = wm_read_emi_reg_locked(dev,
   15582 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15583 			if (rv)
   15584 				goto release;
   15585 
   15586 			/*
   15587 			 * Disable LPLU if both link partners support 100BaseT
   15588 			 * EEE and 100Full is advertised on both ends of the
   15589 			 * link, and enable Auto Enable LPI since there will
   15590 			 * be no driver to enable LPI while in Sx.
   15591 			 */
   15592 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15593 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15594 				uint16_t anar, phy_reg;
   15595 
   15596 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15597 				    &anar);
   15598 				if (anar & ANAR_TX_FD) {
   15599 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15600 					    PHY_CTRL_NOND0A_LPLU);
   15601 
   15602 					/* Set Auto Enable LPI after link up */
   15603 					sc->phy.readreg_locked(dev, 2,
   15604 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15605 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15606 					sc->phy.writereg_locked(dev, 2,
   15607 					    I217_LPI_GPIO_CTRL, phy_reg);
   15608 				}
   15609 			}
   15610 		}
   15611 
   15612 		/*
   15613 		 * For i217 Intel Rapid Start Technology support,
   15614 		 * when the system is going into Sx and no manageability engine
   15615 		 * is present, the driver must configure proxy to reset only on
   15616 		 * power good.	LPI (Low Power Idle) state must also reset only
   15617 		 * on power good, as well as the MTA (Multicast table array).
   15618 		 * The SMBus release must also be disabled on LCD reset.
   15619 		 */
   15620 
   15621 		/*
   15622 		 * Enable MTA to reset for Intel Rapid Start Technology
   15623 		 * Support
   15624 		 */
   15625 
   15626 release:
   15627 		sc->phy.release(sc);
   15628 	}
   15629 out:
   15630 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15631 
   15632 	if (sc->sc_type == WM_T_ICH8)
   15633 		wm_gig_downshift_workaround_ich8lan(sc);
   15634 
   15635 	if (sc->sc_type >= WM_T_PCH) {
   15636 		wm_oem_bits_config_ich8lan(sc, false);
   15637 
   15638 		/* Reset PHY to activate OEM bits on 82577/8 */
   15639 		if (sc->sc_type == WM_T_PCH)
   15640 			wm_reset_phy(sc);
   15641 
   15642 		if (sc->phy.acquire(sc) != 0)
   15643 			return;
   15644 		wm_write_smbus_addr(sc);
   15645 		sc->phy.release(sc);
   15646 	}
   15647 }
   15648 
   15649 /*
   15650  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15651  *  @sc: pointer to the HW structure
   15652  *
   15653  *  During Sx to S0 transitions on non-managed devices or managed devices
   15654  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15655  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15656  *  the PHY.
   15657  *  On i217, setup Intel Rapid Start Technology.
   15658  */
   15659 static int
   15660 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15661 {
   15662 	device_t dev = sc->sc_dev;
   15663 	int rv;
   15664 
   15665 	if (sc->sc_type < WM_T_PCH2)
   15666 		return 0;
   15667 
   15668 	rv = wm_init_phy_workarounds_pchlan(sc);
   15669 	if (rv != 0)
   15670 		return -1;
   15671 
   15672 	/* For i217 Intel Rapid Start Technology support when the system
   15673 	 * is transitioning from Sx and no manageability engine is present
   15674 	 * configure SMBus to restore on reset, disable proxy, and enable
   15675 	 * the reset on MTA (Multicast table array).
   15676 	 */
   15677 	if (sc->sc_phytype == WMPHY_I217) {
   15678 		uint16_t phy_reg;
   15679 
   15680 		if (sc->phy.acquire(sc) != 0)
   15681 			return -1;
   15682 
   15683 		/* Clear Auto Enable LPI after link up */
   15684 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15685 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15686 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15687 
   15688 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15689 			/* Restore clear on SMB if no manageability engine
   15690 			 * is present
   15691 			 */
   15692 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15693 			    &phy_reg);
   15694 			if (rv != 0)
   15695 				goto release;
   15696 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15697 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15698 
   15699 			/* Disable Proxy */
   15700 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15701 		}
   15702 		/* Enable reset on MTA */
   15703 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15704 		if (rv != 0)
   15705 			goto release;
   15706 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15707 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15708 
   15709 release:
   15710 		sc->phy.release(sc);
   15711 		return rv;
   15712 	}
   15713 
   15714 	return 0;
   15715 }
   15716 
   15717 static void
   15718 wm_enable_wakeup(struct wm_softc *sc)
   15719 {
   15720 	uint32_t reg, pmreg;
   15721 	pcireg_t pmode;
   15722 	int rv = 0;
   15723 
   15724 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15725 		device_xname(sc->sc_dev), __func__));
   15726 
   15727 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15728 	    &pmreg, NULL) == 0)
   15729 		return;
   15730 
   15731 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15732 		goto pme;
   15733 
   15734 	/* Advertise the wakeup capability */
   15735 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15736 	    | CTRL_SWDPIN(3));
   15737 
   15738 	/* Keep the laser running on fiber adapters */
   15739 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15740 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15741 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15742 		reg |= CTRL_EXT_SWDPIN(3);
   15743 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15744 	}
   15745 
   15746 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15747 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15748 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15749 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15750 		wm_suspend_workarounds_ich8lan(sc);
   15751 
   15752 #if 0	/* For the multicast packet */
   15753 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15754 	reg |= WUFC_MC;
   15755 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15756 #endif
   15757 
   15758 	if (sc->sc_type >= WM_T_PCH) {
   15759 		rv = wm_enable_phy_wakeup(sc);
   15760 		if (rv != 0)
   15761 			goto pme;
   15762 	} else {
   15763 		/* Enable wakeup by the MAC */
   15764 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15765 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15766 	}
   15767 
   15768 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15769 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15770 		|| (sc->sc_type == WM_T_PCH2))
   15771 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15772 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15773 
   15774 pme:
   15775 	/* Request PME */
   15776 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15777 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15778 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15779 		/* For WOL */
   15780 		pmode |= PCI_PMCSR_PME_EN;
   15781 	} else {
   15782 		/* Disable WOL */
   15783 		pmode &= ~PCI_PMCSR_PME_EN;
   15784 	}
   15785 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15786 }
   15787 
   15788 /* Disable ASPM L0s and/or L1 for workaround */
   15789 static void
   15790 wm_disable_aspm(struct wm_softc *sc)
   15791 {
   15792 	pcireg_t reg, mask = 0;
   15793 	unsigned const char *str = "";
   15794 
   15795 	/*
   15796 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15797 	 * space.
   15798 	 */
   15799 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15800 		return;
   15801 
   15802 	switch (sc->sc_type) {
   15803 	case WM_T_82571:
   15804 	case WM_T_82572:
   15805 		/*
   15806 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15807 		 * State Power management L1 State (ASPM L1).
   15808 		 */
   15809 		mask = PCIE_LCSR_ASPM_L1;
   15810 		str = "L1 is";
   15811 		break;
   15812 	case WM_T_82573:
   15813 	case WM_T_82574:
   15814 	case WM_T_82583:
   15815 		/*
   15816 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15817 		 *
   15818 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15819 		 * some chipset.  The document of 82574 and 82583 says that
   15820 		 * disabling L0s with some specific chipset is sufficient,
   15821 		 * but we follow as of the Intel em driver does.
   15822 		 *
   15823 		 * References:
   15824 		 * Errata 8 of the Specification Update of i82573.
   15825 		 * Errata 20 of the Specification Update of i82574.
   15826 		 * Errata 9 of the Specification Update of i82583.
   15827 		 */
   15828 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15829 		str = "L0s and L1 are";
   15830 		break;
   15831 	default:
   15832 		return;
   15833 	}
   15834 
   15835 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15836 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15837 	reg &= ~mask;
   15838 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15839 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15840 
   15841 	/* Print only in wm_attach() */
   15842 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15843 		aprint_verbose_dev(sc->sc_dev,
   15844 		    "ASPM %s disabled to workaround the errata.\n", str);
   15845 }
   15846 
   15847 /* LPLU */
   15848 
   15849 static void
   15850 wm_lplu_d0_disable(struct wm_softc *sc)
   15851 {
   15852 	struct mii_data *mii = &sc->sc_mii;
   15853 	uint32_t reg;
   15854 	uint16_t phyval;
   15855 
   15856 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15857 		device_xname(sc->sc_dev), __func__));
   15858 
   15859 	if (sc->sc_phytype == WMPHY_IFE)
   15860 		return;
   15861 
   15862 	switch (sc->sc_type) {
   15863 	case WM_T_82571:
   15864 	case WM_T_82572:
   15865 	case WM_T_82573:
   15866 	case WM_T_82575:
   15867 	case WM_T_82576:
   15868 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15869 		phyval &= ~PMR_D0_LPLU;
   15870 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15871 		break;
   15872 	case WM_T_82580:
   15873 	case WM_T_I350:
   15874 	case WM_T_I210:
   15875 	case WM_T_I211:
   15876 		reg = CSR_READ(sc, WMREG_PHPM);
   15877 		reg &= ~PHPM_D0A_LPLU;
   15878 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15879 		break;
   15880 	case WM_T_82574:
   15881 	case WM_T_82583:
   15882 	case WM_T_ICH8:
   15883 	case WM_T_ICH9:
   15884 	case WM_T_ICH10:
   15885 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15886 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15887 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15888 		CSR_WRITE_FLUSH(sc);
   15889 		break;
   15890 	case WM_T_PCH:
   15891 	case WM_T_PCH2:
   15892 	case WM_T_PCH_LPT:
   15893 	case WM_T_PCH_SPT:
   15894 	case WM_T_PCH_CNP:
   15895 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15896 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15897 		if (wm_phy_resetisblocked(sc) == false)
   15898 			phyval |= HV_OEM_BITS_ANEGNOW;
   15899 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15900 		break;
   15901 	default:
   15902 		break;
   15903 	}
   15904 }
   15905 
   15906 /* EEE */
   15907 
   15908 static int
   15909 wm_set_eee_i350(struct wm_softc *sc)
   15910 {
   15911 	struct ethercom *ec = &sc->sc_ethercom;
   15912 	uint32_t ipcnfg, eeer;
   15913 	uint32_t ipcnfg_mask
   15914 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15915 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15916 
   15917 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15918 
   15919 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15920 	eeer = CSR_READ(sc, WMREG_EEER);
   15921 
   15922 	/* Enable or disable per user setting */
   15923 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15924 		ipcnfg |= ipcnfg_mask;
   15925 		eeer |= eeer_mask;
   15926 	} else {
   15927 		ipcnfg &= ~ipcnfg_mask;
   15928 		eeer &= ~eeer_mask;
   15929 	}
   15930 
   15931 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15932 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15933 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15934 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15935 
   15936 	return 0;
   15937 }
   15938 
   15939 static int
   15940 wm_set_eee_pchlan(struct wm_softc *sc)
   15941 {
   15942 	device_t dev = sc->sc_dev;
   15943 	struct ethercom *ec = &sc->sc_ethercom;
   15944 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15945 	int rv = 0;
   15946 
   15947 	switch (sc->sc_phytype) {
   15948 	case WMPHY_82579:
   15949 		lpa = I82579_EEE_LP_ABILITY;
   15950 		pcs_status = I82579_EEE_PCS_STATUS;
   15951 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15952 		break;
   15953 	case WMPHY_I217:
   15954 		lpa = I217_EEE_LP_ABILITY;
   15955 		pcs_status = I217_EEE_PCS_STATUS;
   15956 		adv_addr = I217_EEE_ADVERTISEMENT;
   15957 		break;
   15958 	default:
   15959 		return 0;
   15960 	}
   15961 
   15962 	if (sc->phy.acquire(sc)) {
   15963 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15964 		return 0;
   15965 	}
   15966 
   15967 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15968 	if (rv != 0)
   15969 		goto release;
   15970 
   15971 	/* Clear bits that enable EEE in various speeds */
   15972 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15973 
   15974 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15975 		/* Save off link partner's EEE ability */
   15976 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15977 		if (rv != 0)
   15978 			goto release;
   15979 
   15980 		/* Read EEE advertisement */
   15981 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15982 			goto release;
   15983 
   15984 		/*
   15985 		 * Enable EEE only for speeds in which the link partner is
   15986 		 * EEE capable and for which we advertise EEE.
   15987 		 */
   15988 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15989 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15990 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15991 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15992 			if ((data & ANLPAR_TX_FD) != 0)
   15993 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15994 			else {
   15995 				/*
   15996 				 * EEE is not supported in 100Half, so ignore
   15997 				 * partner's EEE in 100 ability if full-duplex
   15998 				 * is not advertised.
   15999 				 */
   16000 				sc->eee_lp_ability
   16001 				    &= ~AN_EEEADVERT_100_TX;
   16002 			}
   16003 		}
   16004 	}
   16005 
   16006 	if (sc->sc_phytype == WMPHY_82579) {
   16007 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16008 		if (rv != 0)
   16009 			goto release;
   16010 
   16011 		data &= ~I82579_LPI_PLL_SHUT_100;
   16012 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16013 	}
   16014 
   16015 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16016 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16017 		goto release;
   16018 
   16019 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16020 release:
   16021 	sc->phy.release(sc);
   16022 
   16023 	return rv;
   16024 }
   16025 
   16026 static int
   16027 wm_set_eee(struct wm_softc *sc)
   16028 {
   16029 	struct ethercom *ec = &sc->sc_ethercom;
   16030 
   16031 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16032 		return 0;
   16033 
   16034 	if (sc->sc_type == WM_T_I354) {
   16035 		/* I354 uses an external PHY */
   16036 		return 0; /* not yet */
   16037 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16038 		return wm_set_eee_i350(sc);
   16039 	else if (sc->sc_type >= WM_T_PCH2)
   16040 		return wm_set_eee_pchlan(sc);
   16041 
   16042 	return 0;
   16043 }
   16044 
   16045 /*
   16046  * Workarounds (mainly PHY related).
   16047  * Basically, PHY's workarounds are in the PHY drivers.
   16048  */
   16049 
   16050 /* Work-around for 82566 Kumeran PCS lock loss */
   16051 static int
   16052 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16053 {
   16054 	struct mii_data *mii = &sc->sc_mii;
   16055 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16056 	int i, reg, rv;
   16057 	uint16_t phyreg;
   16058 
   16059 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16060 		device_xname(sc->sc_dev), __func__));
   16061 
   16062 	/* If the link is not up, do nothing */
   16063 	if ((status & STATUS_LU) == 0)
   16064 		return 0;
   16065 
   16066 	/* Nothing to do if the link is other than 1Gbps */
   16067 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16068 		return 0;
   16069 
   16070 	for (i = 0; i < 10; i++) {
   16071 		/* read twice */
   16072 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16073 		if (rv != 0)
   16074 			return rv;
   16075 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16076 		if (rv != 0)
   16077 			return rv;
   16078 
   16079 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16080 			goto out;	/* GOOD! */
   16081 
   16082 		/* Reset the PHY */
   16083 		wm_reset_phy(sc);
   16084 		delay(5*1000);
   16085 	}
   16086 
   16087 	/* Disable GigE link negotiation */
   16088 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16089 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16090 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16091 
   16092 	/*
   16093 	 * Call gig speed drop workaround on Gig disable before accessing
   16094 	 * any PHY registers.
   16095 	 */
   16096 	wm_gig_downshift_workaround_ich8lan(sc);
   16097 
   16098 out:
   16099 	return 0;
   16100 }
   16101 
   16102 /*
   16103  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16104  *  @sc: pointer to the HW structure
   16105  *
   16106  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16107  *  LPLU, Gig disable, MDIC PHY reset):
   16108  *    1) Set Kumeran Near-end loopback
   16109  *    2) Clear Kumeran Near-end loopback
   16110  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16111  */
   16112 static void
   16113 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16114 {
   16115 	uint16_t kmreg;
   16116 
   16117 	/* Only for igp3 */
   16118 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16119 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16120 			return;
   16121 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16122 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16123 			return;
   16124 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16125 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16126 	}
   16127 }
   16128 
   16129 /*
   16130  * Workaround for pch's PHYs
   16131  * XXX should be moved to new PHY driver?
   16132  */
   16133 static int
   16134 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16135 {
   16136 	device_t dev = sc->sc_dev;
   16137 	struct mii_data *mii = &sc->sc_mii;
   16138 	struct mii_softc *child;
   16139 	uint16_t phy_data, phyrev = 0;
   16140 	int phytype = sc->sc_phytype;
   16141 	int rv;
   16142 
   16143 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16144 		device_xname(dev), __func__));
   16145 	KASSERT(sc->sc_type == WM_T_PCH);
   16146 
   16147 	/* Set MDIO slow mode before any other MDIO access */
   16148 	if (phytype == WMPHY_82577)
   16149 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16150 			return rv;
   16151 
   16152 	child = LIST_FIRST(&mii->mii_phys);
   16153 	if (child != NULL)
   16154 		phyrev = child->mii_mpd_rev;
   16155 
   16156 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16157 	if ((child != NULL) &&
   16158 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16159 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16160 		/* Disable generation of early preamble (0x4431) */
   16161 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16162 		    &phy_data);
   16163 		if (rv != 0)
   16164 			return rv;
   16165 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16166 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16167 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16168 		    phy_data);
   16169 		if (rv != 0)
   16170 			return rv;
   16171 
   16172 		/* Preamble tuning for SSC */
   16173 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16174 		if (rv != 0)
   16175 			return rv;
   16176 	}
   16177 
   16178 	/* 82578 */
   16179 	if (phytype == WMPHY_82578) {
   16180 		/*
   16181 		 * Return registers to default by doing a soft reset then
   16182 		 * writing 0x3140 to the control register
   16183 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16184 		 */
   16185 		if ((child != NULL) && (phyrev < 2)) {
   16186 			PHY_RESET(child);
   16187 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16188 			if (rv != 0)
   16189 				return rv;
   16190 		}
   16191 	}
   16192 
   16193 	/* Select page 0 */
   16194 	if ((rv = sc->phy.acquire(sc)) != 0)
   16195 		return rv;
   16196 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16197 	sc->phy.release(sc);
   16198 	if (rv != 0)
   16199 		return rv;
   16200 
   16201 	/*
   16202 	 * Configure the K1 Si workaround during phy reset assuming there is
   16203 	 * link so that it disables K1 if link is in 1Gbps.
   16204 	 */
   16205 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16206 		return rv;
   16207 
   16208 	/* Workaround for link disconnects on a busy hub in half duplex */
   16209 	rv = sc->phy.acquire(sc);
   16210 	if (rv)
   16211 		return rv;
   16212 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16213 	if (rv)
   16214 		goto release;
   16215 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16216 	    phy_data & 0x00ff);
   16217 	if (rv)
   16218 		goto release;
   16219 
   16220 	/* Set MSE higher to enable link to stay up when noise is high */
   16221 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16222 release:
   16223 	sc->phy.release(sc);
   16224 
   16225 	return rv;
   16226 }
   16227 
   16228 /*
   16229  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16230  *  @sc:   pointer to the HW structure
   16231  */
   16232 static void
   16233 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16234 {
   16235 
   16236 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16237 		device_xname(sc->sc_dev), __func__));
   16238 
   16239 	if (sc->phy.acquire(sc) != 0)
   16240 		return;
   16241 
   16242 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16243 
   16244 	sc->phy.release(sc);
   16245 }
   16246 
   16247 static void
   16248 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16249 {
   16250 	device_t dev = sc->sc_dev;
   16251 	uint32_t mac_reg;
   16252 	uint16_t i, wuce;
   16253 	int count;
   16254 
   16255 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16256 		device_xname(dev), __func__));
   16257 
   16258 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16259 		return;
   16260 
   16261 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16262 	count = wm_rar_count(sc);
   16263 	for (i = 0; i < count; i++) {
   16264 		uint16_t lo, hi;
   16265 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16266 		lo = (uint16_t)(mac_reg & 0xffff);
   16267 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16268 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16269 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16270 
   16271 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16272 		lo = (uint16_t)(mac_reg & 0xffff);
   16273 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16274 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16275 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16276 	}
   16277 
   16278 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16279 }
   16280 
   16281 /*
   16282  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16283  *  with 82579 PHY
   16284  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16285  */
   16286 static int
   16287 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16288 {
   16289 	device_t dev = sc->sc_dev;
   16290 	int rar_count;
   16291 	int rv;
   16292 	uint32_t mac_reg;
   16293 	uint16_t dft_ctrl, data;
   16294 	uint16_t i;
   16295 
   16296 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16297 		device_xname(dev), __func__));
   16298 
   16299 	if (sc->sc_type < WM_T_PCH2)
   16300 		return 0;
   16301 
   16302 	/* Acquire PHY semaphore */
   16303 	rv = sc->phy.acquire(sc);
   16304 	if (rv != 0)
   16305 		return rv;
   16306 
   16307 	/* Disable Rx path while enabling/disabling workaround */
   16308 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16309 	if (rv != 0)
   16310 		goto out;
   16311 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16312 	    dft_ctrl | (1 << 14));
   16313 	if (rv != 0)
   16314 		goto out;
   16315 
   16316 	if (enable) {
   16317 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16318 		 * SHRAL/H) and initial CRC values to the MAC
   16319 		 */
   16320 		rar_count = wm_rar_count(sc);
   16321 		for (i = 0; i < rar_count; i++) {
   16322 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16323 			uint32_t addr_high, addr_low;
   16324 
   16325 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16326 			if (!(addr_high & RAL_AV))
   16327 				continue;
   16328 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16329 			mac_addr[0] = (addr_low & 0xFF);
   16330 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16331 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16332 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16333 			mac_addr[4] = (addr_high & 0xFF);
   16334 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16335 
   16336 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16337 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16338 		}
   16339 
   16340 		/* Write Rx addresses to the PHY */
   16341 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16342 	}
   16343 
   16344 	/*
   16345 	 * If enable ==
   16346 	 *	true: Enable jumbo frame workaround in the MAC.
   16347 	 *	false: Write MAC register values back to h/w defaults.
   16348 	 */
   16349 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16350 	if (enable) {
   16351 		mac_reg &= ~(1 << 14);
   16352 		mac_reg |= (7 << 15);
   16353 	} else
   16354 		mac_reg &= ~(0xf << 14);
   16355 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16356 
   16357 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16358 	if (enable) {
   16359 		mac_reg |= RCTL_SECRC;
   16360 		sc->sc_rctl |= RCTL_SECRC;
   16361 		sc->sc_flags |= WM_F_CRC_STRIP;
   16362 	} else {
   16363 		mac_reg &= ~RCTL_SECRC;
   16364 		sc->sc_rctl &= ~RCTL_SECRC;
   16365 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16366 	}
   16367 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16368 
   16369 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16370 	if (rv != 0)
   16371 		goto out;
   16372 	if (enable)
   16373 		data |= 1 << 0;
   16374 	else
   16375 		data &= ~(1 << 0);
   16376 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16377 	if (rv != 0)
   16378 		goto out;
   16379 
   16380 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16381 	if (rv != 0)
   16382 		goto out;
   16383 	/*
   16384 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16385 	 * on both the enable case and the disable case. Is it correct?
   16386 	 */
   16387 	data &= ~(0xf << 8);
   16388 	data |= (0xb << 8);
   16389 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16390 	if (rv != 0)
   16391 		goto out;
   16392 
   16393 	/*
   16394 	 * If enable ==
   16395 	 *	true: Enable jumbo frame workaround in the PHY.
   16396 	 *	false: Write PHY register values back to h/w defaults.
   16397 	 */
   16398 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16399 	if (rv != 0)
   16400 		goto out;
   16401 	data &= ~(0x7F << 5);
   16402 	if (enable)
   16403 		data |= (0x37 << 5);
   16404 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16405 	if (rv != 0)
   16406 		goto out;
   16407 
   16408 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16409 	if (rv != 0)
   16410 		goto out;
   16411 	if (enable)
   16412 		data &= ~(1 << 13);
   16413 	else
   16414 		data |= (1 << 13);
   16415 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16416 	if (rv != 0)
   16417 		goto out;
   16418 
   16419 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16420 	if (rv != 0)
   16421 		goto out;
   16422 	data &= ~(0x3FF << 2);
   16423 	if (enable)
   16424 		data |= (I82579_TX_PTR_GAP << 2);
   16425 	else
   16426 		data |= (0x8 << 2);
   16427 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16428 	if (rv != 0)
   16429 		goto out;
   16430 
   16431 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16432 	    enable ? 0xf100 : 0x7e00);
   16433 	if (rv != 0)
   16434 		goto out;
   16435 
   16436 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16437 	if (rv != 0)
   16438 		goto out;
   16439 	if (enable)
   16440 		data |= 1 << 10;
   16441 	else
   16442 		data &= ~(1 << 10);
   16443 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16444 	if (rv != 0)
   16445 		goto out;
   16446 
   16447 	/* Re-enable Rx path after enabling/disabling workaround */
   16448 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16449 	    dft_ctrl & ~(1 << 14));
   16450 
   16451 out:
   16452 	sc->phy.release(sc);
   16453 
   16454 	return rv;
   16455 }
   16456 
   16457 /*
   16458  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16459  *  done after every PHY reset.
   16460  */
   16461 static int
   16462 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16463 {
   16464 	device_t dev = sc->sc_dev;
   16465 	int rv;
   16466 
   16467 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16468 		device_xname(dev), __func__));
   16469 	KASSERT(sc->sc_type == WM_T_PCH2);
   16470 
   16471 	/* Set MDIO slow mode before any other MDIO access */
   16472 	rv = wm_set_mdio_slow_mode_hv(sc);
   16473 	if (rv != 0)
   16474 		return rv;
   16475 
   16476 	rv = sc->phy.acquire(sc);
   16477 	if (rv != 0)
   16478 		return rv;
   16479 	/* Set MSE higher to enable link to stay up when noise is high */
   16480 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16481 	if (rv != 0)
   16482 		goto release;
   16483 	/* Drop link after 5 times MSE threshold was reached */
   16484 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16485 release:
   16486 	sc->phy.release(sc);
   16487 
   16488 	return rv;
   16489 }
   16490 
   16491 /**
   16492  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16493  *  @link: link up bool flag
   16494  *
   16495  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16496  *  preventing further DMA write requests.  Workaround the issue by disabling
   16497  *  the de-assertion of the clock request when in 1Gpbs mode.
   16498  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16499  *  speeds in order to avoid Tx hangs.
   16500  **/
   16501 static int
   16502 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16503 {
   16504 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16505 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16506 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16507 	uint16_t phyreg;
   16508 
   16509 	if (link && (speed == STATUS_SPEED_1000)) {
   16510 		sc->phy.acquire(sc);
   16511 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16512 		    &phyreg);
   16513 		if (rv != 0)
   16514 			goto release;
   16515 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16516 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16517 		if (rv != 0)
   16518 			goto release;
   16519 		delay(20);
   16520 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16521 
   16522 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16523 		    &phyreg);
   16524 release:
   16525 		sc->phy.release(sc);
   16526 		return rv;
   16527 	}
   16528 
   16529 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16530 
   16531 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16532 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16533 	    || !link
   16534 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16535 		goto update_fextnvm6;
   16536 
   16537 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16538 
   16539 	/* Clear link status transmit timeout */
   16540 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16541 	if (speed == STATUS_SPEED_100) {
   16542 		/* Set inband Tx timeout to 5x10us for 100Half */
   16543 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16544 
   16545 		/* Do not extend the K1 entry latency for 100Half */
   16546 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16547 	} else {
   16548 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16549 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16550 
   16551 		/* Extend the K1 entry latency for 10 Mbps */
   16552 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16553 	}
   16554 
   16555 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16556 
   16557 update_fextnvm6:
   16558 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16559 	return 0;
   16560 }
   16561 
   16562 /*
   16563  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16564  *  @sc:   pointer to the HW structure
   16565  *  @link: link up bool flag
   16566  *
   16567  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16568  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16569  *  If link is down, the function will restore the default K1 setting located
   16570  *  in the NVM.
   16571  */
   16572 static int
   16573 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16574 {
   16575 	int k1_enable = sc->sc_nvm_k1_enabled;
   16576 
   16577 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16578 		device_xname(sc->sc_dev), __func__));
   16579 
   16580 	if (sc->phy.acquire(sc) != 0)
   16581 		return -1;
   16582 
   16583 	if (link) {
   16584 		k1_enable = 0;
   16585 
   16586 		/* Link stall fix for link up */
   16587 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16588 		    0x0100);
   16589 	} else {
   16590 		/* Link stall fix for link down */
   16591 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16592 		    0x4100);
   16593 	}
   16594 
   16595 	wm_configure_k1_ich8lan(sc, k1_enable);
   16596 	sc->phy.release(sc);
   16597 
   16598 	return 0;
   16599 }
   16600 
   16601 /*
   16602  *  wm_k1_workaround_lv - K1 Si workaround
   16603  *  @sc:   pointer to the HW structure
   16604  *
   16605  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16606  *  Disable K1 for 1000 and 100 speeds
   16607  */
   16608 static int
   16609 wm_k1_workaround_lv(struct wm_softc *sc)
   16610 {
   16611 	uint32_t reg;
   16612 	uint16_t phyreg;
   16613 	int rv;
   16614 
   16615 	if (sc->sc_type != WM_T_PCH2)
   16616 		return 0;
   16617 
   16618 	/* Set K1 beacon duration based on 10Mbps speed */
   16619 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16620 	if (rv != 0)
   16621 		return rv;
   16622 
   16623 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16624 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16625 		if (phyreg &
   16626 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16627 			/* LV 1G/100 Packet drop issue wa  */
   16628 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16629 			    &phyreg);
   16630 			if (rv != 0)
   16631 				return rv;
   16632 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16633 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16634 			    phyreg);
   16635 			if (rv != 0)
   16636 				return rv;
   16637 		} else {
   16638 			/* For 10Mbps */
   16639 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16640 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16641 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16642 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16643 		}
   16644 	}
   16645 
   16646 	return 0;
   16647 }
   16648 
   16649 /*
   16650  *  wm_link_stall_workaround_hv - Si workaround
   16651  *  @sc: pointer to the HW structure
   16652  *
   16653  *  This function works around a Si bug where the link partner can get
   16654  *  a link up indication before the PHY does. If small packets are sent
   16655  *  by the link partner they can be placed in the packet buffer without
   16656  *  being properly accounted for by the PHY and will stall preventing
   16657  *  further packets from being received.  The workaround is to clear the
   16658  *  packet buffer after the PHY detects link up.
   16659  */
   16660 static int
   16661 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16662 {
   16663 	uint16_t phyreg;
   16664 
   16665 	if (sc->sc_phytype != WMPHY_82578)
   16666 		return 0;
   16667 
   16668 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16669 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16670 	if ((phyreg & BMCR_LOOP) != 0)
   16671 		return 0;
   16672 
   16673 	/* Check if link is up and at 1Gbps */
   16674 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16675 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16676 	    | BM_CS_STATUS_SPEED_MASK;
   16677 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16678 		| BM_CS_STATUS_SPEED_1000))
   16679 		return 0;
   16680 
   16681 	delay(200 * 1000);	/* XXX too big */
   16682 
   16683 	/* Flush the packets in the fifo buffer */
   16684 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16685 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16686 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16687 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16688 
   16689 	return 0;
   16690 }
   16691 
   16692 static int
   16693 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16694 {
   16695 	int rv;
   16696 	uint16_t reg;
   16697 
   16698 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16699 	if (rv != 0)
   16700 		return rv;
   16701 
   16702 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16703 	    reg | HV_KMRN_MDIO_SLOW);
   16704 }
   16705 
   16706 /*
   16707  *  wm_configure_k1_ich8lan - Configure K1 power state
   16708  *  @sc: pointer to the HW structure
   16709  *  @enable: K1 state to configure
   16710  *
   16711  *  Configure the K1 power state based on the provided parameter.
   16712  *  Assumes semaphore already acquired.
   16713  */
   16714 static void
   16715 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16716 {
   16717 	uint32_t ctrl, ctrl_ext, tmp;
   16718 	uint16_t kmreg;
   16719 	int rv;
   16720 
   16721 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16722 
   16723 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16724 	if (rv != 0)
   16725 		return;
   16726 
   16727 	if (k1_enable)
   16728 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16729 	else
   16730 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16731 
   16732 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16733 	if (rv != 0)
   16734 		return;
   16735 
   16736 	delay(20);
   16737 
   16738 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16739 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16740 
   16741 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16742 	tmp |= CTRL_FRCSPD;
   16743 
   16744 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16745 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16746 	CSR_WRITE_FLUSH(sc);
   16747 	delay(20);
   16748 
   16749 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16750 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16751 	CSR_WRITE_FLUSH(sc);
   16752 	delay(20);
   16753 
   16754 	return;
   16755 }
   16756 
   16757 /* special case - for 82575 - need to do manual init ... */
   16758 static void
   16759 wm_reset_init_script_82575(struct wm_softc *sc)
   16760 {
   16761 	/*
   16762 	 * Remark: this is untested code - we have no board without EEPROM
   16763 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16764 	 */
   16765 
   16766 	/* SerDes configuration via SERDESCTRL */
   16767 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16768 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16769 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16770 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16771 
   16772 	/* CCM configuration via CCMCTL register */
   16773 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16774 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16775 
   16776 	/* PCIe lanes configuration */
   16777 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16778 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16779 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16780 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16781 
   16782 	/* PCIe PLL Configuration */
   16783 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16784 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16785 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16786 }
   16787 
   16788 static void
   16789 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16790 {
   16791 	uint32_t reg;
   16792 	uint16_t nvmword;
   16793 	int rv;
   16794 
   16795 	if (sc->sc_type != WM_T_82580)
   16796 		return;
   16797 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16798 		return;
   16799 
   16800 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16801 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16802 	if (rv != 0) {
   16803 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16804 		    __func__);
   16805 		return;
   16806 	}
   16807 
   16808 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16809 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16810 		reg |= MDICNFG_DEST;
   16811 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16812 		reg |= MDICNFG_COM_MDIO;
   16813 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16814 }
   16815 
   16816 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16817 
   16818 static bool
   16819 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16820 {
   16821 	uint32_t reg;
   16822 	uint16_t id1, id2;
   16823 	int i, rv;
   16824 
   16825 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16826 		device_xname(sc->sc_dev), __func__));
   16827 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16828 
   16829 	id1 = id2 = 0xffff;
   16830 	for (i = 0; i < 2; i++) {
   16831 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16832 		    &id1);
   16833 		if ((rv != 0) || MII_INVALIDID(id1))
   16834 			continue;
   16835 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16836 		    &id2);
   16837 		if ((rv != 0) || MII_INVALIDID(id2))
   16838 			continue;
   16839 		break;
   16840 	}
   16841 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16842 		goto out;
   16843 
   16844 	/*
   16845 	 * In case the PHY needs to be in mdio slow mode,
   16846 	 * set slow mode and try to get the PHY id again.
   16847 	 */
   16848 	rv = 0;
   16849 	if (sc->sc_type < WM_T_PCH_LPT) {
   16850 		sc->phy.release(sc);
   16851 		wm_set_mdio_slow_mode_hv(sc);
   16852 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16853 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16854 		sc->phy.acquire(sc);
   16855 	}
   16856 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16857 		device_printf(sc->sc_dev, "XXX return with false\n");
   16858 		return false;
   16859 	}
   16860 out:
   16861 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16862 		/* Only unforce SMBus if ME is not active */
   16863 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16864 			uint16_t phyreg;
   16865 
   16866 			/* Unforce SMBus mode in PHY */
   16867 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16868 			    CV_SMB_CTRL, &phyreg);
   16869 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16870 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16871 			    CV_SMB_CTRL, phyreg);
   16872 
   16873 			/* Unforce SMBus mode in MAC */
   16874 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16875 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16876 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16877 		}
   16878 	}
   16879 	return true;
   16880 }
   16881 
   16882 static void
   16883 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16884 {
   16885 	uint32_t reg;
   16886 	int i;
   16887 
   16888 	/* Set PHY Config Counter to 50msec */
   16889 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16890 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16891 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16892 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16893 
   16894 	/* Toggle LANPHYPC */
   16895 	reg = CSR_READ(sc, WMREG_CTRL);
   16896 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16897 	reg &= ~CTRL_LANPHYPC_VALUE;
   16898 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16899 	CSR_WRITE_FLUSH(sc);
   16900 	delay(1000);
   16901 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16902 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16903 	CSR_WRITE_FLUSH(sc);
   16904 
   16905 	if (sc->sc_type < WM_T_PCH_LPT)
   16906 		delay(50 * 1000);
   16907 	else {
   16908 		i = 20;
   16909 
   16910 		do {
   16911 			delay(5 * 1000);
   16912 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16913 		    && i--);
   16914 
   16915 		delay(30 * 1000);
   16916 	}
   16917 }
   16918 
   16919 static int
   16920 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16921 {
   16922 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16923 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16924 	uint32_t rxa;
   16925 	uint16_t scale = 0, lat_enc = 0;
   16926 	int32_t obff_hwm = 0;
   16927 	int64_t lat_ns, value;
   16928 
   16929 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16930 		device_xname(sc->sc_dev), __func__));
   16931 
   16932 	if (link) {
   16933 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16934 		uint32_t status;
   16935 		uint16_t speed;
   16936 		pcireg_t preg;
   16937 
   16938 		status = CSR_READ(sc, WMREG_STATUS);
   16939 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16940 		case STATUS_SPEED_10:
   16941 			speed = 10;
   16942 			break;
   16943 		case STATUS_SPEED_100:
   16944 			speed = 100;
   16945 			break;
   16946 		case STATUS_SPEED_1000:
   16947 			speed = 1000;
   16948 			break;
   16949 		default:
   16950 			device_printf(sc->sc_dev, "Unknown speed "
   16951 			    "(status = %08x)\n", status);
   16952 			return -1;
   16953 		}
   16954 
   16955 		/* Rx Packet Buffer Allocation size (KB) */
   16956 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16957 
   16958 		/*
   16959 		 * Determine the maximum latency tolerated by the device.
   16960 		 *
   16961 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16962 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16963 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16964 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16965 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16966 		 */
   16967 		lat_ns = ((int64_t)rxa * 1024 -
   16968 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16969 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16970 		if (lat_ns < 0)
   16971 			lat_ns = 0;
   16972 		else
   16973 			lat_ns /= speed;
   16974 		value = lat_ns;
   16975 
   16976 		while (value > LTRV_VALUE) {
   16977 			scale ++;
   16978 			value = howmany(value, __BIT(5));
   16979 		}
   16980 		if (scale > LTRV_SCALE_MAX) {
   16981 			device_printf(sc->sc_dev,
   16982 			    "Invalid LTR latency scale %d\n", scale);
   16983 			return -1;
   16984 		}
   16985 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16986 
   16987 		/* Determine the maximum latency tolerated by the platform */
   16988 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16989 		    WM_PCI_LTR_CAP_LPT);
   16990 		max_snoop = preg & 0xffff;
   16991 		max_nosnoop = preg >> 16;
   16992 
   16993 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16994 
   16995 		if (lat_enc > max_ltr_enc) {
   16996 			lat_enc = max_ltr_enc;
   16997 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16998 			    * PCI_LTR_SCALETONS(
   16999 				    __SHIFTOUT(lat_enc,
   17000 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17001 		}
   17002 
   17003 		if (lat_ns) {
   17004 			lat_ns *= speed * 1000;
   17005 			lat_ns /= 8;
   17006 			lat_ns /= 1000000000;
   17007 			obff_hwm = (int32_t)(rxa - lat_ns);
   17008 		}
   17009 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17010 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17011 			    "(rxa = %d, lat_ns = %d)\n",
   17012 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17013 			return -1;
   17014 		}
   17015 	}
   17016 	/* Snoop and No-Snoop latencies the same */
   17017 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17018 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17019 
   17020 	/* Set OBFF high water mark */
   17021 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17022 	reg |= obff_hwm;
   17023 	CSR_WRITE(sc, WMREG_SVT, reg);
   17024 
   17025 	/* Enable OBFF */
   17026 	reg = CSR_READ(sc, WMREG_SVCR);
   17027 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17028 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17029 
   17030 	return 0;
   17031 }
   17032 
   17033 /*
   17034  * I210 Errata 25 and I211 Errata 10
   17035  * Slow System Clock.
   17036  *
   17037  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17038  */
   17039 static int
   17040 wm_pll_workaround_i210(struct wm_softc *sc)
   17041 {
   17042 	uint32_t mdicnfg, wuc;
   17043 	uint32_t reg;
   17044 	pcireg_t pcireg;
   17045 	uint32_t pmreg;
   17046 	uint16_t nvmword, tmp_nvmword;
   17047 	uint16_t phyval;
   17048 	bool wa_done = false;
   17049 	int i, rv = 0;
   17050 
   17051 	/* Get Power Management cap offset */
   17052 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17053 	    &pmreg, NULL) == 0)
   17054 		return -1;
   17055 
   17056 	/* Save WUC and MDICNFG registers */
   17057 	wuc = CSR_READ(sc, WMREG_WUC);
   17058 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17059 
   17060 	reg = mdicnfg & ~MDICNFG_DEST;
   17061 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17062 
   17063 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17064 		/*
   17065 		 * The default value of the Initialization Control Word 1
   17066 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17067 		 */
   17068 		nvmword = INVM_DEFAULT_AL;
   17069 	}
   17070 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17071 
   17072 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17073 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17074 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17075 
   17076 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17077 			rv = 0;
   17078 			break; /* OK */
   17079 		} else
   17080 			rv = -1;
   17081 
   17082 		wa_done = true;
   17083 		/* Directly reset the internal PHY */
   17084 		reg = CSR_READ(sc, WMREG_CTRL);
   17085 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17086 
   17087 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17088 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17089 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17090 
   17091 		CSR_WRITE(sc, WMREG_WUC, 0);
   17092 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17093 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17094 
   17095 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17096 		    pmreg + PCI_PMCSR);
   17097 		pcireg |= PCI_PMCSR_STATE_D3;
   17098 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17099 		    pmreg + PCI_PMCSR, pcireg);
   17100 		delay(1000);
   17101 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17102 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17103 		    pmreg + PCI_PMCSR, pcireg);
   17104 
   17105 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17106 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17107 
   17108 		/* Restore WUC register */
   17109 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17110 	}
   17111 
   17112 	/* Restore MDICNFG setting */
   17113 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17114 	if (wa_done)
   17115 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17116 	return rv;
   17117 }
   17118 
   17119 static void
   17120 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17121 {
   17122 	uint32_t reg;
   17123 
   17124 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17125 		device_xname(sc->sc_dev), __func__));
   17126 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17127 	    || (sc->sc_type == WM_T_PCH_CNP));
   17128 
   17129 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17130 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17131 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17132 
   17133 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17134 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17135 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17136 }
   17137 
   17138 /* Sysctl functions */
   17139 static int
   17140 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17141 {
   17142 	struct sysctlnode node = *rnode;
   17143 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17144 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17145 	struct wm_softc *sc = txq->txq_sc;
   17146 	uint32_t reg;
   17147 
   17148 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17149 	node.sysctl_data = &reg;
   17150 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17151 }
   17152 
   17153 static int
   17154 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17155 {
   17156 	struct sysctlnode node = *rnode;
   17157 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17158 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17159 	struct wm_softc *sc = txq->txq_sc;
   17160 	uint32_t reg;
   17161 
   17162 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17163 	node.sysctl_data = &reg;
   17164 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17165 }
   17166 
   17167 #ifdef WM_DEBUG
   17168 static int
   17169 wm_sysctl_debug(SYSCTLFN_ARGS)
   17170 {
   17171 	struct sysctlnode node = *rnode;
   17172 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17173 	uint32_t dflags;
   17174 	int error;
   17175 
   17176 	dflags = sc->sc_debug;
   17177 	node.sysctl_data = &dflags;
   17178 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17179 
   17180 	if (error || newp == NULL)
   17181 		return error;
   17182 
   17183 	sc->sc_debug = dflags;
   17184 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17185 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17186 
   17187 	return 0;
   17188 }
   17189 #endif
   17190