Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.710
      1 /*	$NetBSD: if_wm.c,v 1.710 2021/10/20 02:05:15 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.710 2021/10/20 02:05:15 knakahara Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 #include <sys/atomic.h>
    111 
    112 #include <sys/rndsource.h>
    113 
    114 #include <net/if.h>
    115 #include <net/if_dl.h>
    116 #include <net/if_media.h>
    117 #include <net/if_ether.h>
    118 
    119 #include <net/bpf.h>
    120 
    121 #include <net/rss_config.h>
    122 
    123 #include <netinet/in.h>			/* XXX for struct ip */
    124 #include <netinet/in_systm.h>		/* XXX for struct ip */
    125 #include <netinet/ip.h>			/* XXX for struct ip */
    126 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    127 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    128 
    129 #include <sys/bus.h>
    130 #include <sys/intr.h>
    131 #include <machine/endian.h>
    132 
    133 #include <dev/mii/mii.h>
    134 #include <dev/mii/mdio.h>
    135 #include <dev/mii/miivar.h>
    136 #include <dev/mii/miidevs.h>
    137 #include <dev/mii/mii_bitbang.h>
    138 #include <dev/mii/ikphyreg.h>
    139 #include <dev/mii/igphyreg.h>
    140 #include <dev/mii/igphyvar.h>
    141 #include <dev/mii/inbmphyreg.h>
    142 #include <dev/mii/ihphyreg.h>
    143 #include <dev/mii/makphyreg.h>
    144 
    145 #include <dev/pci/pcireg.h>
    146 #include <dev/pci/pcivar.h>
    147 #include <dev/pci/pcidevs.h>
    148 
    149 #include <dev/pci/if_wmreg.h>
    150 #include <dev/pci/if_wmvar.h>
    151 
    152 #ifdef WM_DEBUG
    153 #define	WM_DEBUG_LINK		__BIT(0)
    154 #define	WM_DEBUG_TX		__BIT(1)
    155 #define	WM_DEBUG_RX		__BIT(2)
    156 #define	WM_DEBUG_GMII		__BIT(3)
    157 #define	WM_DEBUG_MANAGE		__BIT(4)
    158 #define	WM_DEBUG_NVM		__BIT(5)
    159 #define	WM_DEBUG_INIT		__BIT(6)
    160 #define	WM_DEBUG_LOCK		__BIT(7)
    161 
    162 #if 0
    163 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    164 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    165 	WM_DEBUG_LOCK
    166 #endif
    167 
    168 #define	DPRINTF(sc, x, y)			  \
    169 	do {					  \
    170 		if ((sc)->sc_debug & (x))	  \
    171 			printf y;		  \
    172 	} while (0)
    173 #else
    174 #define	DPRINTF(sc, x, y)	__nothing
    175 #endif /* WM_DEBUG */
    176 
    177 #ifdef NET_MPSAFE
    178 #define WM_MPSAFE	1
    179 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    180 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    181 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    182 #else
    183 #define WM_CALLOUT_FLAGS	0
    184 #define WM_SOFTINT_FLAGS	0
    185 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    186 #endif
    187 
    188 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    189 
    190 /*
    191  * This device driver's max interrupt numbers.
    192  */
    193 #define WM_MAX_NQUEUEINTR	16
    194 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    195 
    196 #ifndef WM_DISABLE_MSI
    197 #define	WM_DISABLE_MSI 0
    198 #endif
    199 #ifndef WM_DISABLE_MSIX
    200 #define	WM_DISABLE_MSIX 0
    201 #endif
    202 
    203 int wm_disable_msi = WM_DISABLE_MSI;
    204 int wm_disable_msix = WM_DISABLE_MSIX;
    205 
    206 #ifndef WM_WATCHDOG_TIMEOUT
    207 #define WM_WATCHDOG_TIMEOUT 5
    208 #endif
    209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    210 
    211 /*
    212  * Transmit descriptor list size.  Due to errata, we can only have
    213  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    214  * on >= 82544. We tell the upper layers that they can queue a lot
    215  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    216  * of them at a time.
    217  *
    218  * We allow up to 64 DMA segments per packet.  Pathological packet
    219  * chains containing many small mbufs have been observed in zero-copy
    220  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    221  * m_defrag() is called to reduce it.
    222  */
    223 #define	WM_NTXSEGS		64
    224 #define	WM_IFQUEUELEN		256
    225 #define	WM_TXQUEUELEN_MAX	64
    226 #define	WM_TXQUEUELEN_MAX_82547	16
    227 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    228 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    229 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    230 #define	WM_NTXDESC_82542	256
    231 #define	WM_NTXDESC_82544	4096
    232 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    233 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    234 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    235 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    236 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    237 
    238 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    239 
    240 #define	WM_TXINTERQSIZE		256
    241 
    242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 /*
    250  * Receive descriptor list size.  We have one Rx buffer for normal
    251  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    252  * packet.  We allocate 256 receive descriptors, each with a 2k
    253  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    254  */
    255 #define	WM_NRXDESC		256U
    256 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    257 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    258 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    259 
    260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    261 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    262 #endif
    263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    264 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    265 #endif
    266 
    267 typedef union txdescs {
    268 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    269 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    270 } txdescs_t;
    271 
    272 typedef union rxdescs {
    273 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    274 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    275 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    276 } rxdescs_t;
    277 
    278 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    279 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    280 
    281 /*
    282  * Software state for transmit jobs.
    283  */
    284 struct wm_txsoft {
    285 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    287 	int txs_firstdesc;		/* first descriptor in packet */
    288 	int txs_lastdesc;		/* last descriptor in packet */
    289 	int txs_ndesc;			/* # of descriptors used */
    290 };
    291 
    292 /*
    293  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    294  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    295  * them together.
    296  */
    297 struct wm_rxsoft {
    298 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    299 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    300 };
    301 
    302 #define WM_LINKUP_TIMEOUT	50
    303 
    304 static uint16_t swfwphysem[] = {
    305 	SWFW_PHY0_SM,
    306 	SWFW_PHY1_SM,
    307 	SWFW_PHY2_SM,
    308 	SWFW_PHY3_SM
    309 };
    310 
    311 static const uint32_t wm_82580_rxpbs_table[] = {
    312 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    313 };
    314 
    315 struct wm_softc;
    316 
    317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    318 #if !defined(WM_EVENT_COUNTERS)
    319 #define WM_EVENT_COUNTERS 1
    320 #endif
    321 #endif
    322 
    323 #ifdef WM_EVENT_COUNTERS
    324 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    325 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    326 	struct evcnt qname##_ev_##evname;
    327 
    328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    329 	do {								\
    330 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    331 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    332 		    "%s%02d%s", #qname, (qnum), #evname);		\
    333 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    334 		    (evtype), NULL, (xname),				\
    335 		    (q)->qname##_##evname##_evcnt_name);		\
    336 	} while (0)
    337 
    338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    339 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    340 
    341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    342 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    343 
    344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    345 	evcnt_detach(&(q)->qname##_ev_##evname);
    346 #endif /* WM_EVENT_COUNTERS */
    347 
    348 struct wm_txqueue {
    349 	kmutex_t *txq_lock;		/* lock for tx operations */
    350 
    351 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    352 
    353 	/* Software state for the transmit descriptors. */
    354 	int txq_num;			/* must be a power of two */
    355 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    356 
    357 	/* TX control data structures. */
    358 	int txq_ndesc;			/* must be a power of two */
    359 	size_t txq_descsize;		/* a tx descriptor size */
    360 	txdescs_t *txq_descs_u;
    361 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    363 	int txq_desc_rseg;		/* real number of control segment */
    364 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    365 #define	txq_descs	txq_descs_u->sctxu_txdescs
    366 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    367 
    368 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    369 
    370 	int txq_free;			/* number of free Tx descriptors */
    371 	int txq_next;			/* next ready Tx descriptor */
    372 
    373 	int txq_sfree;			/* number of free Tx jobs */
    374 	int txq_snext;			/* next free Tx job */
    375 	int txq_sdirty;			/* dirty Tx jobs */
    376 
    377 	/* These 4 variables are used only on the 82547. */
    378 	int txq_fifo_size;		/* Tx FIFO size */
    379 	int txq_fifo_head;		/* current head of FIFO */
    380 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    381 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    382 
    383 	/*
    384 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    385 	 * CPUs. This queue intermediate them without block.
    386 	 */
    387 	pcq_t *txq_interq;
    388 
    389 	/*
    390 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    391 	 * to manage Tx H/W queue's busy flag.
    392 	 */
    393 	int txq_flags;			/* flags for H/W queue, see below */
    394 #define	WM_TXQ_NO_SPACE		0x1
    395 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    396 
    397 	bool txq_stopping;
    398 
    399 	bool txq_sending;
    400 	time_t txq_lastsent;
    401 
    402 	/* Checksum flags used for previous packet */
    403 	uint32_t	txq_last_hw_cmd;
    404 	uint8_t		txq_last_hw_fields;
    405 	uint16_t	txq_last_hw_ipcs;
    406 	uint16_t	txq_last_hw_tucs;
    407 
    408 	uint32_t txq_packets;		/* for AIM */
    409 	uint32_t txq_bytes;		/* for AIM */
    410 #ifdef WM_EVENT_COUNTERS
    411 	/* TX event counters */
    412 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    413 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    414 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    415 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    416 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    417 					    /* XXX not used? */
    418 
    419 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    422 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    423 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    424 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    425 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    426 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    427 					    /* other than toomanyseg */
    428 
    429 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    430 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    431 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    432 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    433 
    434 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    435 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    436 #endif /* WM_EVENT_COUNTERS */
    437 };
    438 
    439 struct wm_rxqueue {
    440 	kmutex_t *rxq_lock;		/* lock for rx operations */
    441 
    442 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    443 
    444 	/* Software state for the receive descriptors. */
    445 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    446 
    447 	/* RX control data structures. */
    448 	int rxq_ndesc;			/* must be a power of two */
    449 	size_t rxq_descsize;		/* a rx descriptor size */
    450 	rxdescs_t *rxq_descs_u;
    451 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    452 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    453 	int rxq_desc_rseg;		/* real number of control segment */
    454 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    455 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    456 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    457 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    458 
    459 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    460 
    461 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    462 	int rxq_discard;
    463 	int rxq_len;
    464 	struct mbuf *rxq_head;
    465 	struct mbuf *rxq_tail;
    466 	struct mbuf **rxq_tailp;
    467 
    468 	bool rxq_stopping;
    469 
    470 	uint32_t rxq_packets;		/* for AIM */
    471 	uint32_t rxq_bytes;		/* for AIM */
    472 #ifdef WM_EVENT_COUNTERS
    473 	/* RX event counters */
    474 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    475 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    476 
    477 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    478 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    479 #endif
    480 };
    481 
    482 struct wm_queue {
    483 	int wmq_id;			/* index of TX/RX queues */
    484 	int wmq_intr_idx;		/* index of MSI-X tables */
    485 
    486 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    487 	bool wmq_set_itr;
    488 
    489 	struct wm_txqueue wmq_txq;
    490 	struct wm_rxqueue wmq_rxq;
    491 	char sysctlname[32];		/* Name for sysctl */
    492 
    493 	bool wmq_txrx_use_workqueue;
    494 	struct work wmq_cookie;
    495 	void *wmq_si;
    496 };
    497 
    498 struct wm_phyop {
    499 	int (*acquire)(struct wm_softc *);
    500 	void (*release)(struct wm_softc *);
    501 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    502 	int (*writereg_locked)(device_t, int, int, uint16_t);
    503 	int reset_delay_us;
    504 	bool no_errprint;
    505 };
    506 
    507 struct wm_nvmop {
    508 	int (*acquire)(struct wm_softc *);
    509 	void (*release)(struct wm_softc *);
    510 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    511 };
    512 
    513 /*
    514  * Software state per device.
    515  */
    516 struct wm_softc {
    517 	device_t sc_dev;		/* generic device information */
    518 	bus_space_tag_t sc_st;		/* bus space tag */
    519 	bus_space_handle_t sc_sh;	/* bus space handle */
    520 	bus_size_t sc_ss;		/* bus space size */
    521 	bus_space_tag_t sc_iot;		/* I/O space tag */
    522 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    523 	bus_size_t sc_ios;		/* I/O space size */
    524 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    525 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    526 	bus_size_t sc_flashs;		/* flash registers space size */
    527 	off_t sc_flashreg_offset;	/*
    528 					 * offset to flash registers from
    529 					 * start of BAR
    530 					 */
    531 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    532 
    533 	struct ethercom sc_ethercom;	/* ethernet common data */
    534 	struct mii_data sc_mii;		/* MII/media information */
    535 
    536 	pci_chipset_tag_t sc_pc;
    537 	pcitag_t sc_pcitag;
    538 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    539 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    540 
    541 	uint16_t sc_pcidevid;		/* PCI device ID */
    542 	wm_chip_type sc_type;		/* MAC type */
    543 	int sc_rev;			/* MAC revision */
    544 	wm_phy_type sc_phytype;		/* PHY type */
    545 	uint8_t sc_sfptype;		/* SFP type */
    546 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    547 #define	WM_MEDIATYPE_UNKNOWN		0x00
    548 #define	WM_MEDIATYPE_FIBER		0x01
    549 #define	WM_MEDIATYPE_COPPER		0x02
    550 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    551 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    552 	int sc_flags;			/* flags; see below */
    553 	u_short sc_if_flags;		/* last if_flags */
    554 	int sc_ec_capenable;		/* last ec_capenable */
    555 	int sc_flowflags;		/* 802.3x flow control flags */
    556 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    557 	int sc_align_tweak;
    558 
    559 	void *sc_ihs[WM_MAX_NINTR];	/*
    560 					 * interrupt cookie.
    561 					 * - legacy and msi use sc_ihs[0] only
    562 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    563 					 */
    564 	pci_intr_handle_t *sc_intrs;	/*
    565 					 * legacy and msi use sc_intrs[0] only
    566 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    567 					 */
    568 	int sc_nintrs;			/* number of interrupts */
    569 
    570 	int sc_link_intr_idx;		/* index of MSI-X tables */
    571 
    572 	callout_t sc_tick_ch;		/* tick callout */
    573 	bool sc_core_stopping;
    574 
    575 	int sc_nvm_ver_major;
    576 	int sc_nvm_ver_minor;
    577 	int sc_nvm_ver_build;
    578 	int sc_nvm_addrbits;		/* NVM address bits */
    579 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    580 	int sc_ich8_flash_base;
    581 	int sc_ich8_flash_bank_size;
    582 	int sc_nvm_k1_enabled;
    583 
    584 	int sc_nqueues;
    585 	struct wm_queue *sc_queue;
    586 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    587 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    588 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    589 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    590 	struct workqueue *sc_queue_wq;
    591 	bool sc_txrx_use_workqueue;
    592 
    593 	int sc_affinity_offset;
    594 
    595 #ifdef WM_EVENT_COUNTERS
    596 	/* Event counters. */
    597 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    598 
    599 	/* WM_T_82542_2_1 only */
    600 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    601 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    602 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    603 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    604 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    605 #endif /* WM_EVENT_COUNTERS */
    606 
    607 	struct sysctllog *sc_sysctllog;
    608 
    609 	/* This variable are used only on the 82547. */
    610 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    611 
    612 	uint32_t sc_ctrl;		/* prototype CTRL register */
    613 #if 0
    614 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    615 #endif
    616 	uint32_t sc_icr;		/* prototype interrupt bits */
    617 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    618 	uint32_t sc_tctl;		/* prototype TCTL register */
    619 	uint32_t sc_rctl;		/* prototype RCTL register */
    620 	uint32_t sc_txcw;		/* prototype TXCW register */
    621 	uint32_t sc_tipg;		/* prototype TIPG register */
    622 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    623 	uint32_t sc_pba;		/* prototype PBA register */
    624 
    625 	int sc_tbi_linkup;		/* TBI link status */
    626 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    627 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    628 
    629 	int sc_mchash_type;		/* multicast filter offset */
    630 
    631 	krndsource_t rnd_source;	/* random source */
    632 
    633 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    634 
    635 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    636 	kmutex_t *sc_ich_phymtx;	/*
    637 					 * 82574/82583/ICH/PCH specific PHY
    638 					 * mutex. For 82574/82583, the mutex
    639 					 * is used for both PHY and NVM.
    640 					 */
    641 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    642 
    643 	struct wm_phyop phy;
    644 	struct wm_nvmop nvm;
    645 #ifdef WM_DEBUG
    646 	uint32_t sc_debug;
    647 #endif
    648 };
    649 
    650 #define WM_CORE_LOCK(_sc)						\
    651 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    652 #define WM_CORE_UNLOCK(_sc)						\
    653 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    654 #define WM_CORE_LOCKED(_sc)						\
    655 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    656 
    657 #define	WM_RXCHAIN_RESET(rxq)						\
    658 do {									\
    659 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    660 	*(rxq)->rxq_tailp = NULL;					\
    661 	(rxq)->rxq_len = 0;						\
    662 } while (/*CONSTCOND*/0)
    663 
    664 #define	WM_RXCHAIN_LINK(rxq, m)						\
    665 do {									\
    666 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    667 	(rxq)->rxq_tailp = &(m)->m_next;				\
    668 } while (/*CONSTCOND*/0)
    669 
    670 #ifdef WM_EVENT_COUNTERS
    671 #ifdef __HAVE_ATOMIC64_LOADSTORE
    672 #define	WM_EVCNT_INCR(ev)						\
    673 	atomic_store_relaxed(&((ev)->ev_count),				\
    674 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    675 #define	WM_EVCNT_ADD(ev, val)						\
    676 	atomic_store_relaxed(&((ev)->ev_count),				\
    677 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    678 #else
    679 #define	WM_EVCNT_INCR(ev)						\
    680 	((ev)->ev_count)++
    681 #define	WM_EVCNT_ADD(ev, val)						\
    682 	(ev)->ev_count += (val)
    683 #endif
    684 
    685 #define WM_Q_EVCNT_INCR(qname, evname)			\
    686 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    687 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    688 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    689 #else /* !WM_EVENT_COUNTERS */
    690 #define	WM_EVCNT_INCR(ev)	/* nothing */
    691 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    692 
    693 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    694 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    695 #endif /* !WM_EVENT_COUNTERS */
    696 
    697 #define	CSR_READ(sc, reg)						\
    698 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    699 #define	CSR_WRITE(sc, reg, val)						\
    700 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    701 #define	CSR_WRITE_FLUSH(sc)						\
    702 	(void)CSR_READ((sc), WMREG_STATUS)
    703 
    704 #define ICH8_FLASH_READ32(sc, reg)					\
    705 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    706 	    (reg) + sc->sc_flashreg_offset)
    707 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    708 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    709 	    (reg) + sc->sc_flashreg_offset, (data))
    710 
    711 #define ICH8_FLASH_READ16(sc, reg)					\
    712 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    713 	    (reg) + sc->sc_flashreg_offset)
    714 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    715 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    716 	    (reg) + sc->sc_flashreg_offset, (data))
    717 
    718 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    719 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    720 
    721 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    722 #define	WM_CDTXADDR_HI(txq, x)						\
    723 	(sizeof(bus_addr_t) == 8 ?					\
    724 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    725 
    726 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    727 #define	WM_CDRXADDR_HI(rxq, x)						\
    728 	(sizeof(bus_addr_t) == 8 ?					\
    729 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    730 
    731 /*
    732  * Register read/write functions.
    733  * Other than CSR_{READ|WRITE}().
    734  */
    735 #if 0
    736 static inline uint32_t wm_io_read(struct wm_softc *, int);
    737 #endif
    738 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    740     uint32_t, uint32_t);
    741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    742 
    743 /*
    744  * Descriptor sync/init functions.
    745  */
    746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    749 
    750 /*
    751  * Device driver interface functions and commonly used functions.
    752  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    753  */
    754 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    755 static int	wm_match(device_t, cfdata_t, void *);
    756 static void	wm_attach(device_t, device_t, void *);
    757 static int	wm_detach(device_t, int);
    758 static bool	wm_suspend(device_t, const pmf_qual_t *);
    759 static bool	wm_resume(device_t, const pmf_qual_t *);
    760 static void	wm_watchdog(struct ifnet *);
    761 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    762     uint16_t *);
    763 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    764     uint16_t *);
    765 static void	wm_tick(void *);
    766 static int	wm_ifflags_cb(struct ethercom *);
    767 static int	wm_ioctl(struct ifnet *, u_long, void *);
    768 /* MAC address related */
    769 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    770 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    771 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    772 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    773 static int	wm_rar_count(struct wm_softc *);
    774 static void	wm_set_filter(struct wm_softc *);
    775 /* Reset and init related */
    776 static void	wm_set_vlan(struct wm_softc *);
    777 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    778 static void	wm_get_auto_rd_done(struct wm_softc *);
    779 static void	wm_lan_init_done(struct wm_softc *);
    780 static void	wm_get_cfg_done(struct wm_softc *);
    781 static int	wm_phy_post_reset(struct wm_softc *);
    782 static int	wm_write_smbus_addr(struct wm_softc *);
    783 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    784 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    785 static void	wm_initialize_hardware_bits(struct wm_softc *);
    786 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    787 static int	wm_reset_phy(struct wm_softc *);
    788 static void	wm_flush_desc_rings(struct wm_softc *);
    789 static void	wm_reset(struct wm_softc *);
    790 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    791 static void	wm_rxdrain(struct wm_rxqueue *);
    792 static void	wm_init_rss(struct wm_softc *);
    793 static void	wm_adjust_qnum(struct wm_softc *, int);
    794 static inline bool	wm_is_using_msix(struct wm_softc *);
    795 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    796 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    797 static int	wm_setup_legacy(struct wm_softc *);
    798 static int	wm_setup_msix(struct wm_softc *);
    799 static int	wm_init(struct ifnet *);
    800 static int	wm_init_locked(struct ifnet *);
    801 static void	wm_init_sysctls(struct wm_softc *);
    802 static void	wm_unset_stopping_flags(struct wm_softc *);
    803 static void	wm_set_stopping_flags(struct wm_softc *);
    804 static void	wm_stop(struct ifnet *, int);
    805 static void	wm_stop_locked(struct ifnet *, bool, bool);
    806 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    807 static void	wm_82547_txfifo_stall(void *);
    808 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    809 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    810 /* DMA related */
    811 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    812 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    813 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    814 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    815     struct wm_txqueue *);
    816 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    817 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    818 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    819     struct wm_rxqueue *);
    820 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    821 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    822 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    823 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    824 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    825 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    826 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    827     struct wm_txqueue *);
    828 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    829     struct wm_rxqueue *);
    830 static int	wm_alloc_txrx_queues(struct wm_softc *);
    831 static void	wm_free_txrx_queues(struct wm_softc *);
    832 static int	wm_init_txrx_queues(struct wm_softc *);
    833 /* Start */
    834 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    835     struct wm_txsoft *, uint32_t *, uint8_t *);
    836 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    837 static void	wm_start(struct ifnet *);
    838 static void	wm_start_locked(struct ifnet *);
    839 static int	wm_transmit(struct ifnet *, struct mbuf *);
    840 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    841 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    842 		    bool);
    843 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    844     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    845 static void	wm_nq_start(struct ifnet *);
    846 static void	wm_nq_start_locked(struct ifnet *);
    847 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    848 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    849 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    850 		    bool);
    851 static void	wm_deferred_start_locked(struct wm_txqueue *);
    852 static void	wm_handle_queue(void *);
    853 static void	wm_handle_queue_work(struct work *, void *);
    854 /* Interrupt */
    855 static bool	wm_txeof(struct wm_txqueue *, u_int);
    856 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    857 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    858 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    859 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    860 static void	wm_linkintr(struct wm_softc *, uint32_t);
    861 static int	wm_intr_legacy(void *);
    862 static inline void	wm_txrxintr_disable(struct wm_queue *);
    863 static inline void	wm_txrxintr_enable(struct wm_queue *);
    864 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    865 static int	wm_txrxintr_msix(void *);
    866 static int	wm_linkintr_msix(void *);
    867 
    868 /*
    869  * Media related.
    870  * GMII, SGMII, TBI, SERDES and SFP.
    871  */
    872 /* Common */
    873 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    874 /* GMII related */
    875 static void	wm_gmii_reset(struct wm_softc *);
    876 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    877 static int	wm_get_phy_id_82575(struct wm_softc *);
    878 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    879 static int	wm_gmii_mediachange(struct ifnet *);
    880 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    882 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    883 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    884 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    885 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    887 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    889 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    890 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    891 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    892 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    893 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    894 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    895 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    896 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    897 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    898 	bool);
    899 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    900 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    901 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    902 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    903 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    904 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    905 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    906 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    907 static void	wm_gmii_statchg(struct ifnet *);
    908 /*
    909  * kumeran related (80003, ICH* and PCH*).
    910  * These functions are not for accessing MII registers but for accessing
    911  * kumeran specific registers.
    912  */
    913 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    914 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    915 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    916 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    917 /* EMI register related */
    918 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    919 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    920 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    921 /* SGMII */
    922 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    923 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    924 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    925 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    926 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    927 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    928 /* TBI related */
    929 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    930 static void	wm_tbi_mediainit(struct wm_softc *);
    931 static int	wm_tbi_mediachange(struct ifnet *);
    932 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    933 static int	wm_check_for_link(struct wm_softc *);
    934 static void	wm_tbi_tick(struct wm_softc *);
    935 /* SERDES related */
    936 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    937 static int	wm_serdes_mediachange(struct ifnet *);
    938 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    939 static void	wm_serdes_tick(struct wm_softc *);
    940 /* SFP related */
    941 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    942 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    943 
    944 /*
    945  * NVM related.
    946  * Microwire, SPI (w/wo EERD) and Flash.
    947  */
    948 /* Misc functions */
    949 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    950 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    951 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    952 /* Microwire */
    953 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    954 /* SPI */
    955 static int	wm_nvm_ready_spi(struct wm_softc *);
    956 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    957 /* Using with EERD */
    958 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    959 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    960 /* Flash */
    961 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    962     unsigned int *);
    963 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    964 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    965 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    966     uint32_t *);
    967 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    968 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    969 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    970 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    971 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    972 /* iNVM */
    973 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    974 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    975 /* Lock, detecting NVM type, validate checksum and read */
    976 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    977 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    978 static int	wm_nvm_validate_checksum(struct wm_softc *);
    979 static void	wm_nvm_version_invm(struct wm_softc *);
    980 static void	wm_nvm_version(struct wm_softc *);
    981 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    982 
    983 /*
    984  * Hardware semaphores.
    985  * Very complexed...
    986  */
    987 static int	wm_get_null(struct wm_softc *);
    988 static void	wm_put_null(struct wm_softc *);
    989 static int	wm_get_eecd(struct wm_softc *);
    990 static void	wm_put_eecd(struct wm_softc *);
    991 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    992 static void	wm_put_swsm_semaphore(struct wm_softc *);
    993 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    994 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    995 static int	wm_get_nvm_80003(struct wm_softc *);
    996 static void	wm_put_nvm_80003(struct wm_softc *);
    997 static int	wm_get_nvm_82571(struct wm_softc *);
    998 static void	wm_put_nvm_82571(struct wm_softc *);
    999 static int	wm_get_phy_82575(struct wm_softc *);
   1000 static void	wm_put_phy_82575(struct wm_softc *);
   1001 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1002 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1003 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1004 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1005 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1006 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1007 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1008 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1009 
   1010 /*
   1011  * Management mode and power management related subroutines.
   1012  * BMC, AMT, suspend/resume and EEE.
   1013  */
   1014 #if 0
   1015 static int	wm_check_mng_mode(struct wm_softc *);
   1016 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1017 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1018 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1019 #endif
   1020 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1021 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1022 static void	wm_get_hw_control(struct wm_softc *);
   1023 static void	wm_release_hw_control(struct wm_softc *);
   1024 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1025 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1026 static void	wm_init_manageability(struct wm_softc *);
   1027 static void	wm_release_manageability(struct wm_softc *);
   1028 static void	wm_get_wakeup(struct wm_softc *);
   1029 static int	wm_ulp_disable(struct wm_softc *);
   1030 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1031 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1032 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1033 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1034 static void	wm_enable_wakeup(struct wm_softc *);
   1035 static void	wm_disable_aspm(struct wm_softc *);
   1036 /* LPLU (Low Power Link Up) */
   1037 static void	wm_lplu_d0_disable(struct wm_softc *);
   1038 /* EEE */
   1039 static int	wm_set_eee_i350(struct wm_softc *);
   1040 static int	wm_set_eee_pchlan(struct wm_softc *);
   1041 static int	wm_set_eee(struct wm_softc *);
   1042 
   1043 /*
   1044  * Workarounds (mainly PHY related).
   1045  * Basically, PHY's workarounds are in the PHY drivers.
   1046  */
   1047 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1048 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1049 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1050 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1051 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1052 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1053 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1054 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1055 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1056 static int	wm_k1_workaround_lv(struct wm_softc *);
   1057 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1058 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1059 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1060 static void	wm_reset_init_script_82575(struct wm_softc *);
   1061 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1062 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1063 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1064 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1065 static int	wm_pll_workaround_i210(struct wm_softc *);
   1066 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1067 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1068 static void	wm_set_linkdown_discard(struct wm_softc *);
   1069 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1070 
   1071 #ifdef WM_DEBUG
   1072 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1073 #endif
   1074 
   1075 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1076     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1077 
   1078 /*
   1079  * Devices supported by this driver.
   1080  */
   1081 static const struct wm_product {
   1082 	pci_vendor_id_t		wmp_vendor;
   1083 	pci_product_id_t	wmp_product;
   1084 	const char		*wmp_name;
   1085 	wm_chip_type		wmp_type;
   1086 	uint32_t		wmp_flags;
   1087 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1088 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1089 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1090 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1091 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1092 } wm_products[] = {
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1094 	  "Intel i82542 1000BASE-X Ethernet",
   1095 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1098 	  "Intel i82543GC 1000BASE-X Ethernet",
   1099 	  WM_T_82543,		WMP_F_FIBER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1102 	  "Intel i82543GC 1000BASE-T Ethernet",
   1103 	  WM_T_82543,		WMP_F_COPPER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1106 	  "Intel i82544EI 1000BASE-T Ethernet",
   1107 	  WM_T_82544,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1110 	  "Intel i82544EI 1000BASE-X Ethernet",
   1111 	  WM_T_82544,		WMP_F_FIBER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1114 	  "Intel i82544GC 1000BASE-T Ethernet",
   1115 	  WM_T_82544,		WMP_F_COPPER },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1118 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1119 	  WM_T_82544,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1122 	  "Intel i82540EM 1000BASE-T Ethernet",
   1123 	  WM_T_82540,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1126 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1127 	  WM_T_82540,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1130 	  "Intel i82540EP 1000BASE-T Ethernet",
   1131 	  WM_T_82540,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1134 	  "Intel i82540EP 1000BASE-T Ethernet",
   1135 	  WM_T_82540,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1138 	  "Intel i82540EP 1000BASE-T Ethernet",
   1139 	  WM_T_82540,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1142 	  "Intel i82545EM 1000BASE-T Ethernet",
   1143 	  WM_T_82545,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1146 	  "Intel i82545GM 1000BASE-T Ethernet",
   1147 	  WM_T_82545_3,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1150 	  "Intel i82545GM 1000BASE-X Ethernet",
   1151 	  WM_T_82545_3,		WMP_F_FIBER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1154 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1155 	  WM_T_82545_3,		WMP_F_SERDES },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1158 	  "Intel i82546EB 1000BASE-T Ethernet",
   1159 	  WM_T_82546,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1162 	  "Intel i82546EB 1000BASE-T Ethernet",
   1163 	  WM_T_82546,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1166 	  "Intel i82545EM 1000BASE-X Ethernet",
   1167 	  WM_T_82545,		WMP_F_FIBER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1170 	  "Intel i82546EB 1000BASE-X Ethernet",
   1171 	  WM_T_82546,		WMP_F_FIBER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1174 	  "Intel i82546GB 1000BASE-T Ethernet",
   1175 	  WM_T_82546_3,		WMP_F_COPPER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1178 	  "Intel i82546GB 1000BASE-X Ethernet",
   1179 	  WM_T_82546_3,		WMP_F_FIBER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1182 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1183 	  WM_T_82546_3,		WMP_F_SERDES },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1186 	  "i82546GB quad-port Gigabit Ethernet",
   1187 	  WM_T_82546_3,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1190 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1191 	  WM_T_82546_3,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1194 	  "Intel PRO/1000MT (82546GB)",
   1195 	  WM_T_82546_3,		WMP_F_COPPER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1198 	  "Intel i82541EI 1000BASE-T Ethernet",
   1199 	  WM_T_82541,		WMP_F_COPPER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1202 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1203 	  WM_T_82541,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1206 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1207 	  WM_T_82541,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1210 	  "Intel i82541ER 1000BASE-T Ethernet",
   1211 	  WM_T_82541_2,		WMP_F_COPPER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1214 	  "Intel i82541GI 1000BASE-T Ethernet",
   1215 	  WM_T_82541_2,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1218 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1219 	  WM_T_82541_2,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1222 	  "Intel i82541PI 1000BASE-T Ethernet",
   1223 	  WM_T_82541_2,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1226 	  "Intel i82547EI 1000BASE-T Ethernet",
   1227 	  WM_T_82547,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1230 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1231 	  WM_T_82547,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1234 	  "Intel i82547GI 1000BASE-T Ethernet",
   1235 	  WM_T_82547_2,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1238 	  "Intel PRO/1000 PT (82571EB)",
   1239 	  WM_T_82571,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1242 	  "Intel PRO/1000 PF (82571EB)",
   1243 	  WM_T_82571,		WMP_F_FIBER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1246 	  "Intel PRO/1000 PB (82571EB)",
   1247 	  WM_T_82571,		WMP_F_SERDES },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1250 	  "Intel PRO/1000 QT (82571EB)",
   1251 	  WM_T_82571,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1254 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1255 	  WM_T_82571,		WMP_F_COPPER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1258 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1259 	  WM_T_82571,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1262 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1263 	  WM_T_82571,		WMP_F_SERDES },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1266 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1267 	  WM_T_82571,		WMP_F_SERDES },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1270 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1271 	  WM_T_82571,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1274 	  "Intel i82572EI 1000baseT Ethernet",
   1275 	  WM_T_82572,		WMP_F_COPPER },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1278 	  "Intel i82572EI 1000baseX Ethernet",
   1279 	  WM_T_82572,		WMP_F_FIBER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1282 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1283 	  WM_T_82572,		WMP_F_SERDES },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1286 	  "Intel i82572EI 1000baseT Ethernet",
   1287 	  WM_T_82572,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1290 	  "Intel i82573E",
   1291 	  WM_T_82573,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1294 	  "Intel i82573E IAMT",
   1295 	  WM_T_82573,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1298 	  "Intel i82573L Gigabit Ethernet",
   1299 	  WM_T_82573,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1302 	  "Intel i82574L",
   1303 	  WM_T_82574,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1306 	  "Intel i82574L",
   1307 	  WM_T_82574,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1310 	  "Intel i82583V",
   1311 	  WM_T_82583,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1314 	  "i80003 dual 1000baseT Ethernet",
   1315 	  WM_T_80003,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1318 	  "i80003 dual 1000baseX Ethernet",
   1319 	  WM_T_80003,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1322 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1323 	  WM_T_80003,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1326 	  "Intel i80003 1000baseT Ethernet",
   1327 	  WM_T_80003,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1330 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1331 	  WM_T_80003,		WMP_F_SERDES },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1334 	  "Intel i82801H (M_AMT) LAN Controller",
   1335 	  WM_T_ICH8,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1337 	  "Intel i82801H (AMT) LAN Controller",
   1338 	  WM_T_ICH8,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1340 	  "Intel i82801H LAN Controller",
   1341 	  WM_T_ICH8,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1343 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1344 	  WM_T_ICH8,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1346 	  "Intel i82801H (M) LAN Controller",
   1347 	  WM_T_ICH8,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1349 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1350 	  WM_T_ICH8,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1352 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1353 	  WM_T_ICH8,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1355 	  "82567V-3 LAN Controller",
   1356 	  WM_T_ICH8,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1358 	  "82801I (AMT) LAN Controller",
   1359 	  WM_T_ICH9,		WMP_F_COPPER },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1361 	  "82801I 10/100 LAN Controller",
   1362 	  WM_T_ICH9,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1364 	  "82801I (G) 10/100 LAN Controller",
   1365 	  WM_T_ICH9,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1367 	  "82801I (GT) 10/100 LAN Controller",
   1368 	  WM_T_ICH9,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1370 	  "82801I (C) LAN Controller",
   1371 	  WM_T_ICH9,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1373 	  "82801I mobile LAN Controller",
   1374 	  WM_T_ICH9,		WMP_F_COPPER },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1376 	  "82801I mobile (V) LAN Controller",
   1377 	  WM_T_ICH9,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1379 	  "82801I mobile (AMT) LAN Controller",
   1380 	  WM_T_ICH9,		WMP_F_COPPER },
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1382 	  "82567LM-4 LAN Controller",
   1383 	  WM_T_ICH9,		WMP_F_COPPER },
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1385 	  "82567LM-2 LAN Controller",
   1386 	  WM_T_ICH10,		WMP_F_COPPER },
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1388 	  "82567LF-2 LAN Controller",
   1389 	  WM_T_ICH10,		WMP_F_COPPER },
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1391 	  "82567LM-3 LAN Controller",
   1392 	  WM_T_ICH10,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1394 	  "82567LF-3 LAN Controller",
   1395 	  WM_T_ICH10,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1397 	  "82567V-2 LAN Controller",
   1398 	  WM_T_ICH10,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1400 	  "82567V-3? LAN Controller",
   1401 	  WM_T_ICH10,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1403 	  "HANKSVILLE LAN Controller",
   1404 	  WM_T_ICH10,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1406 	  "PCH LAN (82577LM) Controller",
   1407 	  WM_T_PCH,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1409 	  "PCH LAN (82577LC) Controller",
   1410 	  WM_T_PCH,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1412 	  "PCH LAN (82578DM) Controller",
   1413 	  WM_T_PCH,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1415 	  "PCH LAN (82578DC) Controller",
   1416 	  WM_T_PCH,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1418 	  "PCH2 LAN (82579LM) Controller",
   1419 	  WM_T_PCH2,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1421 	  "PCH2 LAN (82579V) Controller",
   1422 	  WM_T_PCH2,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1424 	  "82575EB dual-1000baseT Ethernet",
   1425 	  WM_T_82575,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1427 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1428 	  WM_T_82575,		WMP_F_SERDES },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1430 	  "82575GB quad-1000baseT Ethernet",
   1431 	  WM_T_82575,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1433 	  "82575GB quad-1000baseT Ethernet (PM)",
   1434 	  WM_T_82575,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1436 	  "82576 1000BaseT Ethernet",
   1437 	  WM_T_82576,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1439 	  "82576 1000BaseX Ethernet",
   1440 	  WM_T_82576,		WMP_F_FIBER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1443 	  "82576 gigabit Ethernet (SERDES)",
   1444 	  WM_T_82576,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1447 	  "82576 quad-1000BaseT Ethernet",
   1448 	  WM_T_82576,		WMP_F_COPPER },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1451 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1452 	  WM_T_82576,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1455 	  "82576 gigabit Ethernet",
   1456 	  WM_T_82576,		WMP_F_COPPER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1459 	  "82576 gigabit Ethernet (SERDES)",
   1460 	  WM_T_82576,		WMP_F_SERDES },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1462 	  "82576 quad-gigabit Ethernet (SERDES)",
   1463 	  WM_T_82576,		WMP_F_SERDES },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1466 	  "82580 1000BaseT Ethernet",
   1467 	  WM_T_82580,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1469 	  "82580 1000BaseX Ethernet",
   1470 	  WM_T_82580,		WMP_F_FIBER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1473 	  "82580 1000BaseT Ethernet (SERDES)",
   1474 	  WM_T_82580,		WMP_F_SERDES },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1477 	  "82580 gigabit Ethernet (SGMII)",
   1478 	  WM_T_82580,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1480 	  "82580 dual-1000BaseT Ethernet",
   1481 	  WM_T_82580,		WMP_F_COPPER },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1484 	  "82580 quad-1000BaseX Ethernet",
   1485 	  WM_T_82580,		WMP_F_FIBER },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1488 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1489 	  WM_T_82580,		WMP_F_COPPER },
   1490 
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1492 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1493 	  WM_T_82580,		WMP_F_SERDES },
   1494 
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1496 	  "DH89XXCC 1000BASE-KX Ethernet",
   1497 	  WM_T_82580,		WMP_F_SERDES },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1500 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1501 	  WM_T_82580,		WMP_F_SERDES },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1504 	  "I350 Gigabit Network Connection",
   1505 	  WM_T_I350,		WMP_F_COPPER },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1508 	  "I350 Gigabit Fiber Network Connection",
   1509 	  WM_T_I350,		WMP_F_FIBER },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1512 	  "I350 Gigabit Backplane Connection",
   1513 	  WM_T_I350,		WMP_F_SERDES },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1516 	  "I350 Quad Port Gigabit Ethernet",
   1517 	  WM_T_I350,		WMP_F_SERDES },
   1518 
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1520 	  "I350 Gigabit Connection",
   1521 	  WM_T_I350,		WMP_F_COPPER },
   1522 
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1524 	  "I354 Gigabit Ethernet (KX)",
   1525 	  WM_T_I354,		WMP_F_SERDES },
   1526 
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1528 	  "I354 Gigabit Ethernet (SGMII)",
   1529 	  WM_T_I354,		WMP_F_COPPER },
   1530 
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1532 	  "I354 Gigabit Ethernet (2.5G)",
   1533 	  WM_T_I354,		WMP_F_COPPER },
   1534 
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1536 	  "I210-T1 Ethernet Server Adapter",
   1537 	  WM_T_I210,		WMP_F_COPPER },
   1538 
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1540 	  "I210 Ethernet (Copper OEM)",
   1541 	  WM_T_I210,		WMP_F_COPPER },
   1542 
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1544 	  "I210 Ethernet (Copper IT)",
   1545 	  WM_T_I210,		WMP_F_COPPER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1548 	  "I210 Ethernet (Copper, FLASH less)",
   1549 	  WM_T_I210,		WMP_F_COPPER },
   1550 
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1552 	  "I210 Gigabit Ethernet (Fiber)",
   1553 	  WM_T_I210,		WMP_F_FIBER },
   1554 
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1556 	  "I210 Gigabit Ethernet (SERDES)",
   1557 	  WM_T_I210,		WMP_F_SERDES },
   1558 
   1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1560 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1561 	  WM_T_I210,		WMP_F_SERDES },
   1562 
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1564 	  "I210 Gigabit Ethernet (SGMII)",
   1565 	  WM_T_I210,		WMP_F_COPPER },
   1566 
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1568 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1569 	  WM_T_I210,		WMP_F_COPPER },
   1570 
   1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1572 	  "I211 Ethernet (COPPER)",
   1573 	  WM_T_I211,		WMP_F_COPPER },
   1574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1575 	  "I217 V Ethernet Connection",
   1576 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1577 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1578 	  "I217 LM Ethernet Connection",
   1579 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1581 	  "I218 V Ethernet Connection",
   1582 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1584 	  "I218 V Ethernet Connection",
   1585 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1587 	  "I218 V Ethernet Connection",
   1588 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1589 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1590 	  "I218 LM Ethernet Connection",
   1591 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1593 	  "I218 LM Ethernet Connection",
   1594 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1596 	  "I218 LM Ethernet Connection",
   1597 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1599 	  "I219 LM Ethernet Connection",
   1600 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1602 	  "I219 LM (2) Ethernet Connection",
   1603 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1605 	  "I219 LM (3) Ethernet Connection",
   1606 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1608 	  "I219 LM (4) Ethernet Connection",
   1609 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1611 	  "I219 LM (5) Ethernet Connection",
   1612 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1614 	  "I219 LM (6) Ethernet Connection",
   1615 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1617 	  "I219 LM (7) Ethernet Connection",
   1618 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1620 	  "I219 LM (8) Ethernet Connection",
   1621 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1623 	  "I219 LM (9) Ethernet Connection",
   1624 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1626 	  "I219 LM (10) Ethernet Connection",
   1627 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1629 	  "I219 LM (11) Ethernet Connection",
   1630 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1632 	  "I219 LM (12) Ethernet Connection",
   1633 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1635 	  "I219 LM (13) Ethernet Connection",
   1636 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1638 	  "I219 LM (14) Ethernet Connection",
   1639 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1641 	  "I219 LM (15) Ethernet Connection",
   1642 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1644 	  "I219 LM (16) Ethernet Connection",
   1645 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1647 	  "I219 LM (17) Ethernet Connection",
   1648 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1650 	  "I219 LM (18) Ethernet Connection",
   1651 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1653 	  "I219 LM (19) Ethernet Connection",
   1654 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1656 	  "I219 V Ethernet Connection",
   1657 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1659 	  "I219 V (2) Ethernet Connection",
   1660 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1662 	  "I219 V (4) Ethernet Connection",
   1663 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1665 	  "I219 V (5) Ethernet Connection",
   1666 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1668 	  "I219 V (6) Ethernet Connection",
   1669 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1671 	  "I219 V (7) Ethernet Connection",
   1672 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1674 	  "I219 V (8) Ethernet Connection",
   1675 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1677 	  "I219 V (9) Ethernet Connection",
   1678 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1680 	  "I219 V (10) Ethernet Connection",
   1681 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1683 	  "I219 V (11) Ethernet Connection",
   1684 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1686 	  "I219 V (12) Ethernet Connection",
   1687 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1689 	  "I219 V (13) Ethernet Connection",
   1690 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1692 	  "I219 V (14) Ethernet Connection",
   1693 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1695 	  "I219 V (15) Ethernet Connection",
   1696 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1698 	  "I219 V (16) Ethernet Connection",
   1699 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1701 	  "I219 V (17) Ethernet Connection",
   1702 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1704 	  "I219 V (18) Ethernet Connection",
   1705 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1707 	  "I219 V (19) Ethernet Connection",
   1708 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1709 	{ 0,			0,
   1710 	  NULL,
   1711 	  0,			0 },
   1712 };
   1713 
   1714 /*
   1715  * Register read/write functions.
   1716  * Other than CSR_{READ|WRITE}().
   1717  */
   1718 
   1719 #if 0 /* Not currently used */
   1720 static inline uint32_t
   1721 wm_io_read(struct wm_softc *sc, int reg)
   1722 {
   1723 
   1724 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1725 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1726 }
   1727 #endif
   1728 
   1729 static inline void
   1730 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1731 {
   1732 
   1733 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1734 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1735 }
   1736 
   1737 static inline void
   1738 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1739     uint32_t data)
   1740 {
   1741 	uint32_t regval;
   1742 	int i;
   1743 
   1744 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1745 
   1746 	CSR_WRITE(sc, reg, regval);
   1747 
   1748 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1749 		delay(5);
   1750 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1751 			break;
   1752 	}
   1753 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1754 		aprint_error("%s: WARNING:"
   1755 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1756 		    device_xname(sc->sc_dev), reg);
   1757 	}
   1758 }
   1759 
   1760 static inline void
   1761 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1762 {
   1763 	wa->wa_low = htole32(v & 0xffffffffU);
   1764 	if (sizeof(bus_addr_t) == 8)
   1765 		wa->wa_high = htole32((uint64_t) v >> 32);
   1766 	else
   1767 		wa->wa_high = 0;
   1768 }
   1769 
   1770 /*
   1771  * Descriptor sync/init functions.
   1772  */
   1773 static inline void
   1774 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1775 {
   1776 	struct wm_softc *sc = txq->txq_sc;
   1777 
   1778 	/* If it will wrap around, sync to the end of the ring. */
   1779 	if ((start + num) > WM_NTXDESC(txq)) {
   1780 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1781 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1782 		    (WM_NTXDESC(txq) - start), ops);
   1783 		num -= (WM_NTXDESC(txq) - start);
   1784 		start = 0;
   1785 	}
   1786 
   1787 	/* Now sync whatever is left. */
   1788 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1789 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1790 }
   1791 
   1792 static inline void
   1793 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1794 {
   1795 	struct wm_softc *sc = rxq->rxq_sc;
   1796 
   1797 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1798 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1799 }
   1800 
   1801 static inline void
   1802 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1803 {
   1804 	struct wm_softc *sc = rxq->rxq_sc;
   1805 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1806 	struct mbuf *m = rxs->rxs_mbuf;
   1807 
   1808 	/*
   1809 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1810 	 * so that the payload after the Ethernet header is aligned
   1811 	 * to a 4-byte boundary.
   1812 
   1813 	 * XXX BRAINDAMAGE ALERT!
   1814 	 * The stupid chip uses the same size for every buffer, which
   1815 	 * is set in the Receive Control register.  We are using the 2K
   1816 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1817 	 * reason, we can't "scoot" packets longer than the standard
   1818 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1819 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1820 	 * the upper layer copy the headers.
   1821 	 */
   1822 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1823 
   1824 	if (sc->sc_type == WM_T_82574) {
   1825 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1826 		rxd->erx_data.erxd_addr =
   1827 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1828 		rxd->erx_data.erxd_dd = 0;
   1829 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1830 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1831 
   1832 		rxd->nqrx_data.nrxd_paddr =
   1833 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1834 		/* Currently, split header is not supported. */
   1835 		rxd->nqrx_data.nrxd_haddr = 0;
   1836 	} else {
   1837 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1838 
   1839 		wm_set_dma_addr(&rxd->wrx_addr,
   1840 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1841 		rxd->wrx_len = 0;
   1842 		rxd->wrx_cksum = 0;
   1843 		rxd->wrx_status = 0;
   1844 		rxd->wrx_errors = 0;
   1845 		rxd->wrx_special = 0;
   1846 	}
   1847 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1848 
   1849 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1850 }
   1851 
   1852 /*
   1853  * Device driver interface functions and commonly used functions.
   1854  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1855  */
   1856 
   1857 /* Lookup supported device table */
   1858 static const struct wm_product *
   1859 wm_lookup(const struct pci_attach_args *pa)
   1860 {
   1861 	const struct wm_product *wmp;
   1862 
   1863 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1864 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1865 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1866 			return wmp;
   1867 	}
   1868 	return NULL;
   1869 }
   1870 
   1871 /* The match function (ca_match) */
   1872 static int
   1873 wm_match(device_t parent, cfdata_t cf, void *aux)
   1874 {
   1875 	struct pci_attach_args *pa = aux;
   1876 
   1877 	if (wm_lookup(pa) != NULL)
   1878 		return 1;
   1879 
   1880 	return 0;
   1881 }
   1882 
   1883 /* The attach function (ca_attach) */
   1884 static void
   1885 wm_attach(device_t parent, device_t self, void *aux)
   1886 {
   1887 	struct wm_softc *sc = device_private(self);
   1888 	struct pci_attach_args *pa = aux;
   1889 	prop_dictionary_t dict;
   1890 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1891 	pci_chipset_tag_t pc = pa->pa_pc;
   1892 	int counts[PCI_INTR_TYPE_SIZE];
   1893 	pci_intr_type_t max_type;
   1894 	const char *eetype, *xname;
   1895 	bus_space_tag_t memt;
   1896 	bus_space_handle_t memh;
   1897 	bus_size_t memsize;
   1898 	int memh_valid;
   1899 	int i, error;
   1900 	const struct wm_product *wmp;
   1901 	prop_data_t ea;
   1902 	prop_number_t pn;
   1903 	uint8_t enaddr[ETHER_ADDR_LEN];
   1904 	char buf[256];
   1905 	char wqname[MAXCOMLEN];
   1906 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1907 	pcireg_t preg, memtype;
   1908 	uint16_t eeprom_data, apme_mask;
   1909 	bool force_clear_smbi;
   1910 	uint32_t link_mode;
   1911 	uint32_t reg;
   1912 
   1913 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1914 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1915 #endif
   1916 	sc->sc_dev = self;
   1917 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1918 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1919 	sc->sc_core_stopping = false;
   1920 
   1921 	wmp = wm_lookup(pa);
   1922 #ifdef DIAGNOSTIC
   1923 	if (wmp == NULL) {
   1924 		printf("\n");
   1925 		panic("wm_attach: impossible");
   1926 	}
   1927 #endif
   1928 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1929 
   1930 	sc->sc_pc = pa->pa_pc;
   1931 	sc->sc_pcitag = pa->pa_tag;
   1932 
   1933 	if (pci_dma64_available(pa))
   1934 		sc->sc_dmat = pa->pa_dmat64;
   1935 	else
   1936 		sc->sc_dmat = pa->pa_dmat;
   1937 
   1938 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1939 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1940 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1941 
   1942 	sc->sc_type = wmp->wmp_type;
   1943 
   1944 	/* Set default function pointers */
   1945 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1946 	sc->phy.release = sc->nvm.release = wm_put_null;
   1947 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1948 
   1949 	if (sc->sc_type < WM_T_82543) {
   1950 		if (sc->sc_rev < 2) {
   1951 			aprint_error_dev(sc->sc_dev,
   1952 			    "i82542 must be at least rev. 2\n");
   1953 			return;
   1954 		}
   1955 		if (sc->sc_rev < 3)
   1956 			sc->sc_type = WM_T_82542_2_0;
   1957 	}
   1958 
   1959 	/*
   1960 	 * Disable MSI for Errata:
   1961 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1962 	 *
   1963 	 *  82544: Errata 25
   1964 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1965 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1966 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1967 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1968 	 *
   1969 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1970 	 *
   1971 	 *  82571 & 82572: Errata 63
   1972 	 */
   1973 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1974 	    || (sc->sc_type == WM_T_82572))
   1975 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1976 
   1977 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1978 	    || (sc->sc_type == WM_T_82580)
   1979 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1980 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1981 		sc->sc_flags |= WM_F_NEWQUEUE;
   1982 
   1983 	/* Set device properties (mactype) */
   1984 	dict = device_properties(sc->sc_dev);
   1985 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1986 
   1987 	/*
   1988 	 * Map the device.  All devices support memory-mapped acccess,
   1989 	 * and it is really required for normal operation.
   1990 	 */
   1991 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1992 	switch (memtype) {
   1993 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1994 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1995 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1996 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1997 		break;
   1998 	default:
   1999 		memh_valid = 0;
   2000 		break;
   2001 	}
   2002 
   2003 	if (memh_valid) {
   2004 		sc->sc_st = memt;
   2005 		sc->sc_sh = memh;
   2006 		sc->sc_ss = memsize;
   2007 	} else {
   2008 		aprint_error_dev(sc->sc_dev,
   2009 		    "unable to map device registers\n");
   2010 		return;
   2011 	}
   2012 
   2013 	/*
   2014 	 * In addition, i82544 and later support I/O mapped indirect
   2015 	 * register access.  It is not desirable (nor supported in
   2016 	 * this driver) to use it for normal operation, though it is
   2017 	 * required to work around bugs in some chip versions.
   2018 	 */
   2019 	switch (sc->sc_type) {
   2020 	case WM_T_82544:
   2021 	case WM_T_82541:
   2022 	case WM_T_82541_2:
   2023 	case WM_T_82547:
   2024 	case WM_T_82547_2:
   2025 		/* First we have to find the I/O BAR. */
   2026 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2027 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2028 			if (memtype == PCI_MAPREG_TYPE_IO)
   2029 				break;
   2030 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2031 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2032 				i += 4;	/* skip high bits, too */
   2033 		}
   2034 		if (i < PCI_MAPREG_END) {
   2035 			/*
   2036 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2037 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2038 			 * It's no problem because newer chips has no this
   2039 			 * bug.
   2040 			 *
   2041 			 * The i8254x doesn't apparently respond when the
   2042 			 * I/O BAR is 0, which looks somewhat like it's not
   2043 			 * been configured.
   2044 			 */
   2045 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2046 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2047 				aprint_error_dev(sc->sc_dev,
   2048 				    "WARNING: I/O BAR at zero.\n");
   2049 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2050 					0, &sc->sc_iot, &sc->sc_ioh,
   2051 					NULL, &sc->sc_ios) == 0) {
   2052 				sc->sc_flags |= WM_F_IOH_VALID;
   2053 			} else
   2054 				aprint_error_dev(sc->sc_dev,
   2055 				    "WARNING: unable to map I/O space\n");
   2056 		}
   2057 		break;
   2058 	default:
   2059 		break;
   2060 	}
   2061 
   2062 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2063 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2064 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2065 	if (sc->sc_type < WM_T_82542_2_1)
   2066 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2067 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2068 
   2069 	/* Power up chip */
   2070 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2071 	    && error != EOPNOTSUPP) {
   2072 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2073 		return;
   2074 	}
   2075 
   2076 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2077 	/*
   2078 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2079 	 * resource.
   2080 	 */
   2081 	if (sc->sc_nqueues > 1) {
   2082 		max_type = PCI_INTR_TYPE_MSIX;
   2083 		/*
   2084 		 *  82583 has a MSI-X capability in the PCI configuration space
   2085 		 * but it doesn't support it. At least the document doesn't
   2086 		 * say anything about MSI-X.
   2087 		 */
   2088 		counts[PCI_INTR_TYPE_MSIX]
   2089 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2090 	} else {
   2091 		max_type = PCI_INTR_TYPE_MSI;
   2092 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2093 	}
   2094 
   2095 	/* Allocation settings */
   2096 	counts[PCI_INTR_TYPE_MSI] = 1;
   2097 	counts[PCI_INTR_TYPE_INTX] = 1;
   2098 	/* overridden by disable flags */
   2099 	if (wm_disable_msi != 0) {
   2100 		counts[PCI_INTR_TYPE_MSI] = 0;
   2101 		if (wm_disable_msix != 0) {
   2102 			max_type = PCI_INTR_TYPE_INTX;
   2103 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2104 		}
   2105 	} else if (wm_disable_msix != 0) {
   2106 		max_type = PCI_INTR_TYPE_MSI;
   2107 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2108 	}
   2109 
   2110 alloc_retry:
   2111 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2112 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2113 		return;
   2114 	}
   2115 
   2116 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2117 		error = wm_setup_msix(sc);
   2118 		if (error) {
   2119 			pci_intr_release(pc, sc->sc_intrs,
   2120 			    counts[PCI_INTR_TYPE_MSIX]);
   2121 
   2122 			/* Setup for MSI: Disable MSI-X */
   2123 			max_type = PCI_INTR_TYPE_MSI;
   2124 			counts[PCI_INTR_TYPE_MSI] = 1;
   2125 			counts[PCI_INTR_TYPE_INTX] = 1;
   2126 			goto alloc_retry;
   2127 		}
   2128 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2129 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2130 		error = wm_setup_legacy(sc);
   2131 		if (error) {
   2132 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2133 			    counts[PCI_INTR_TYPE_MSI]);
   2134 
   2135 			/* The next try is for INTx: Disable MSI */
   2136 			max_type = PCI_INTR_TYPE_INTX;
   2137 			counts[PCI_INTR_TYPE_INTX] = 1;
   2138 			goto alloc_retry;
   2139 		}
   2140 	} else {
   2141 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2142 		error = wm_setup_legacy(sc);
   2143 		if (error) {
   2144 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2145 			    counts[PCI_INTR_TYPE_INTX]);
   2146 			return;
   2147 		}
   2148 	}
   2149 
   2150 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2151 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2152 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2153 	    WM_WORKQUEUE_FLAGS);
   2154 	if (error) {
   2155 		aprint_error_dev(sc->sc_dev,
   2156 		    "unable to create workqueue\n");
   2157 		goto out;
   2158 	}
   2159 
   2160 	/*
   2161 	 * Check the function ID (unit number of the chip).
   2162 	 */
   2163 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2164 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2165 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2166 	    || (sc->sc_type == WM_T_82580)
   2167 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2168 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2169 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2170 	else
   2171 		sc->sc_funcid = 0;
   2172 
   2173 	/*
   2174 	 * Determine a few things about the bus we're connected to.
   2175 	 */
   2176 	if (sc->sc_type < WM_T_82543) {
   2177 		/* We don't really know the bus characteristics here. */
   2178 		sc->sc_bus_speed = 33;
   2179 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2180 		/*
   2181 		 * CSA (Communication Streaming Architecture) is about as fast
   2182 		 * a 32-bit 66MHz PCI Bus.
   2183 		 */
   2184 		sc->sc_flags |= WM_F_CSA;
   2185 		sc->sc_bus_speed = 66;
   2186 		aprint_verbose_dev(sc->sc_dev,
   2187 		    "Communication Streaming Architecture\n");
   2188 		if (sc->sc_type == WM_T_82547) {
   2189 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2190 			callout_setfunc(&sc->sc_txfifo_ch,
   2191 			    wm_82547_txfifo_stall, sc);
   2192 			aprint_verbose_dev(sc->sc_dev,
   2193 			    "using 82547 Tx FIFO stall work-around\n");
   2194 		}
   2195 	} else if (sc->sc_type >= WM_T_82571) {
   2196 		sc->sc_flags |= WM_F_PCIE;
   2197 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2198 		    && (sc->sc_type != WM_T_ICH10)
   2199 		    && (sc->sc_type != WM_T_PCH)
   2200 		    && (sc->sc_type != WM_T_PCH2)
   2201 		    && (sc->sc_type != WM_T_PCH_LPT)
   2202 		    && (sc->sc_type != WM_T_PCH_SPT)
   2203 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2204 			/* ICH* and PCH* have no PCIe capability registers */
   2205 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2206 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2207 				NULL) == 0)
   2208 				aprint_error_dev(sc->sc_dev,
   2209 				    "unable to find PCIe capability\n");
   2210 		}
   2211 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2212 	} else {
   2213 		reg = CSR_READ(sc, WMREG_STATUS);
   2214 		if (reg & STATUS_BUS64)
   2215 			sc->sc_flags |= WM_F_BUS64;
   2216 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2217 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2218 
   2219 			sc->sc_flags |= WM_F_PCIX;
   2220 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2221 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2222 				aprint_error_dev(sc->sc_dev,
   2223 				    "unable to find PCIX capability\n");
   2224 			else if (sc->sc_type != WM_T_82545_3 &&
   2225 				 sc->sc_type != WM_T_82546_3) {
   2226 				/*
   2227 				 * Work around a problem caused by the BIOS
   2228 				 * setting the max memory read byte count
   2229 				 * incorrectly.
   2230 				 */
   2231 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2232 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2233 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2234 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2235 
   2236 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2237 				    PCIX_CMD_BYTECNT_SHIFT;
   2238 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2239 				    PCIX_STATUS_MAXB_SHIFT;
   2240 				if (bytecnt > maxb) {
   2241 					aprint_verbose_dev(sc->sc_dev,
   2242 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2243 					    512 << bytecnt, 512 << maxb);
   2244 					pcix_cmd = (pcix_cmd &
   2245 					    ~PCIX_CMD_BYTECNT_MASK) |
   2246 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2247 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2248 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2249 					    pcix_cmd);
   2250 				}
   2251 			}
   2252 		}
   2253 		/*
   2254 		 * The quad port adapter is special; it has a PCIX-PCIX
   2255 		 * bridge on the board, and can run the secondary bus at
   2256 		 * a higher speed.
   2257 		 */
   2258 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2259 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2260 								      : 66;
   2261 		} else if (sc->sc_flags & WM_F_PCIX) {
   2262 			switch (reg & STATUS_PCIXSPD_MASK) {
   2263 			case STATUS_PCIXSPD_50_66:
   2264 				sc->sc_bus_speed = 66;
   2265 				break;
   2266 			case STATUS_PCIXSPD_66_100:
   2267 				sc->sc_bus_speed = 100;
   2268 				break;
   2269 			case STATUS_PCIXSPD_100_133:
   2270 				sc->sc_bus_speed = 133;
   2271 				break;
   2272 			default:
   2273 				aprint_error_dev(sc->sc_dev,
   2274 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2275 				    reg & STATUS_PCIXSPD_MASK);
   2276 				sc->sc_bus_speed = 66;
   2277 				break;
   2278 			}
   2279 		} else
   2280 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2281 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2282 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2283 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2284 	}
   2285 
   2286 	/* clear interesting stat counters */
   2287 	CSR_READ(sc, WMREG_COLC);
   2288 	CSR_READ(sc, WMREG_RXERRC);
   2289 
   2290 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2291 	    || (sc->sc_type >= WM_T_ICH8))
   2292 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2293 	if (sc->sc_type >= WM_T_ICH8)
   2294 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2295 
   2296 	/* Set PHY, NVM mutex related stuff */
   2297 	switch (sc->sc_type) {
   2298 	case WM_T_82542_2_0:
   2299 	case WM_T_82542_2_1:
   2300 	case WM_T_82543:
   2301 	case WM_T_82544:
   2302 		/* Microwire */
   2303 		sc->nvm.read = wm_nvm_read_uwire;
   2304 		sc->sc_nvm_wordsize = 64;
   2305 		sc->sc_nvm_addrbits = 6;
   2306 		break;
   2307 	case WM_T_82540:
   2308 	case WM_T_82545:
   2309 	case WM_T_82545_3:
   2310 	case WM_T_82546:
   2311 	case WM_T_82546_3:
   2312 		/* Microwire */
   2313 		sc->nvm.read = wm_nvm_read_uwire;
   2314 		reg = CSR_READ(sc, WMREG_EECD);
   2315 		if (reg & EECD_EE_SIZE) {
   2316 			sc->sc_nvm_wordsize = 256;
   2317 			sc->sc_nvm_addrbits = 8;
   2318 		} else {
   2319 			sc->sc_nvm_wordsize = 64;
   2320 			sc->sc_nvm_addrbits = 6;
   2321 		}
   2322 		sc->sc_flags |= WM_F_LOCK_EECD;
   2323 		sc->nvm.acquire = wm_get_eecd;
   2324 		sc->nvm.release = wm_put_eecd;
   2325 		break;
   2326 	case WM_T_82541:
   2327 	case WM_T_82541_2:
   2328 	case WM_T_82547:
   2329 	case WM_T_82547_2:
   2330 		reg = CSR_READ(sc, WMREG_EECD);
   2331 		/*
   2332 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2333 		 * on 8254[17], so set flags and functios before calling it.
   2334 		 */
   2335 		sc->sc_flags |= WM_F_LOCK_EECD;
   2336 		sc->nvm.acquire = wm_get_eecd;
   2337 		sc->nvm.release = wm_put_eecd;
   2338 		if (reg & EECD_EE_TYPE) {
   2339 			/* SPI */
   2340 			sc->nvm.read = wm_nvm_read_spi;
   2341 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2342 			wm_nvm_set_addrbits_size_eecd(sc);
   2343 		} else {
   2344 			/* Microwire */
   2345 			sc->nvm.read = wm_nvm_read_uwire;
   2346 			if ((reg & EECD_EE_ABITS) != 0) {
   2347 				sc->sc_nvm_wordsize = 256;
   2348 				sc->sc_nvm_addrbits = 8;
   2349 			} else {
   2350 				sc->sc_nvm_wordsize = 64;
   2351 				sc->sc_nvm_addrbits = 6;
   2352 			}
   2353 		}
   2354 		break;
   2355 	case WM_T_82571:
   2356 	case WM_T_82572:
   2357 		/* SPI */
   2358 		sc->nvm.read = wm_nvm_read_eerd;
   2359 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2360 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2361 		wm_nvm_set_addrbits_size_eecd(sc);
   2362 		sc->phy.acquire = wm_get_swsm_semaphore;
   2363 		sc->phy.release = wm_put_swsm_semaphore;
   2364 		sc->nvm.acquire = wm_get_nvm_82571;
   2365 		sc->nvm.release = wm_put_nvm_82571;
   2366 		break;
   2367 	case WM_T_82573:
   2368 	case WM_T_82574:
   2369 	case WM_T_82583:
   2370 		sc->nvm.read = wm_nvm_read_eerd;
   2371 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2372 		if (sc->sc_type == WM_T_82573) {
   2373 			sc->phy.acquire = wm_get_swsm_semaphore;
   2374 			sc->phy.release = wm_put_swsm_semaphore;
   2375 			sc->nvm.acquire = wm_get_nvm_82571;
   2376 			sc->nvm.release = wm_put_nvm_82571;
   2377 		} else {
   2378 			/* Both PHY and NVM use the same semaphore. */
   2379 			sc->phy.acquire = sc->nvm.acquire
   2380 			    = wm_get_swfwhw_semaphore;
   2381 			sc->phy.release = sc->nvm.release
   2382 			    = wm_put_swfwhw_semaphore;
   2383 		}
   2384 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2385 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2386 			sc->sc_nvm_wordsize = 2048;
   2387 		} else {
   2388 			/* SPI */
   2389 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2390 			wm_nvm_set_addrbits_size_eecd(sc);
   2391 		}
   2392 		break;
   2393 	case WM_T_82575:
   2394 	case WM_T_82576:
   2395 	case WM_T_82580:
   2396 	case WM_T_I350:
   2397 	case WM_T_I354:
   2398 	case WM_T_80003:
   2399 		/* SPI */
   2400 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2401 		wm_nvm_set_addrbits_size_eecd(sc);
   2402 		if ((sc->sc_type == WM_T_80003)
   2403 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2404 			sc->nvm.read = wm_nvm_read_eerd;
   2405 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2406 		} else {
   2407 			sc->nvm.read = wm_nvm_read_spi;
   2408 			sc->sc_flags |= WM_F_LOCK_EECD;
   2409 		}
   2410 		sc->phy.acquire = wm_get_phy_82575;
   2411 		sc->phy.release = wm_put_phy_82575;
   2412 		sc->nvm.acquire = wm_get_nvm_80003;
   2413 		sc->nvm.release = wm_put_nvm_80003;
   2414 		break;
   2415 	case WM_T_ICH8:
   2416 	case WM_T_ICH9:
   2417 	case WM_T_ICH10:
   2418 	case WM_T_PCH:
   2419 	case WM_T_PCH2:
   2420 	case WM_T_PCH_LPT:
   2421 		sc->nvm.read = wm_nvm_read_ich8;
   2422 		/* FLASH */
   2423 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2424 		sc->sc_nvm_wordsize = 2048;
   2425 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2426 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2427 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2428 			aprint_error_dev(sc->sc_dev,
   2429 			    "can't map FLASH registers\n");
   2430 			goto out;
   2431 		}
   2432 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2433 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2434 		    ICH_FLASH_SECTOR_SIZE;
   2435 		sc->sc_ich8_flash_bank_size =
   2436 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2437 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2438 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2439 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2440 		sc->sc_flashreg_offset = 0;
   2441 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2442 		sc->phy.release = wm_put_swflag_ich8lan;
   2443 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2444 		sc->nvm.release = wm_put_nvm_ich8lan;
   2445 		break;
   2446 	case WM_T_PCH_SPT:
   2447 	case WM_T_PCH_CNP:
   2448 		sc->nvm.read = wm_nvm_read_spt;
   2449 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2450 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2451 		sc->sc_flasht = sc->sc_st;
   2452 		sc->sc_flashh = sc->sc_sh;
   2453 		sc->sc_ich8_flash_base = 0;
   2454 		sc->sc_nvm_wordsize =
   2455 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2456 		    * NVM_SIZE_MULTIPLIER;
   2457 		/* It is size in bytes, we want words */
   2458 		sc->sc_nvm_wordsize /= 2;
   2459 		/* Assume 2 banks */
   2460 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2461 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2462 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2463 		sc->phy.release = wm_put_swflag_ich8lan;
   2464 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2465 		sc->nvm.release = wm_put_nvm_ich8lan;
   2466 		break;
   2467 	case WM_T_I210:
   2468 	case WM_T_I211:
   2469 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2470 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2471 		if (wm_nvm_flash_presence_i210(sc)) {
   2472 			sc->nvm.read = wm_nvm_read_eerd;
   2473 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2474 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2475 			wm_nvm_set_addrbits_size_eecd(sc);
   2476 		} else {
   2477 			sc->nvm.read = wm_nvm_read_invm;
   2478 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2479 			sc->sc_nvm_wordsize = INVM_SIZE;
   2480 		}
   2481 		sc->phy.acquire = wm_get_phy_82575;
   2482 		sc->phy.release = wm_put_phy_82575;
   2483 		sc->nvm.acquire = wm_get_nvm_80003;
   2484 		sc->nvm.release = wm_put_nvm_80003;
   2485 		break;
   2486 	default:
   2487 		break;
   2488 	}
   2489 
   2490 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2491 	switch (sc->sc_type) {
   2492 	case WM_T_82571:
   2493 	case WM_T_82572:
   2494 		reg = CSR_READ(sc, WMREG_SWSM2);
   2495 		if ((reg & SWSM2_LOCK) == 0) {
   2496 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2497 			force_clear_smbi = true;
   2498 		} else
   2499 			force_clear_smbi = false;
   2500 		break;
   2501 	case WM_T_82573:
   2502 	case WM_T_82574:
   2503 	case WM_T_82583:
   2504 		force_clear_smbi = true;
   2505 		break;
   2506 	default:
   2507 		force_clear_smbi = false;
   2508 		break;
   2509 	}
   2510 	if (force_clear_smbi) {
   2511 		reg = CSR_READ(sc, WMREG_SWSM);
   2512 		if ((reg & SWSM_SMBI) != 0)
   2513 			aprint_error_dev(sc->sc_dev,
   2514 			    "Please update the Bootagent\n");
   2515 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2516 	}
   2517 
   2518 	/*
   2519 	 * Defer printing the EEPROM type until after verifying the checksum
   2520 	 * This allows the EEPROM type to be printed correctly in the case
   2521 	 * that no EEPROM is attached.
   2522 	 */
   2523 	/*
   2524 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2525 	 * this for later, so we can fail future reads from the EEPROM.
   2526 	 */
   2527 	if (wm_nvm_validate_checksum(sc)) {
   2528 		/*
   2529 		 * Read twice again because some PCI-e parts fail the
   2530 		 * first check due to the link being in sleep state.
   2531 		 */
   2532 		if (wm_nvm_validate_checksum(sc))
   2533 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2534 	}
   2535 
   2536 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2537 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2538 	else {
   2539 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2540 		    sc->sc_nvm_wordsize);
   2541 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2542 			aprint_verbose("iNVM");
   2543 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2544 			aprint_verbose("FLASH(HW)");
   2545 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2546 			aprint_verbose("FLASH");
   2547 		else {
   2548 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2549 				eetype = "SPI";
   2550 			else
   2551 				eetype = "MicroWire";
   2552 			aprint_verbose("(%d address bits) %s EEPROM",
   2553 			    sc->sc_nvm_addrbits, eetype);
   2554 		}
   2555 	}
   2556 	wm_nvm_version(sc);
   2557 	aprint_verbose("\n");
   2558 
   2559 	/*
   2560 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2561 	 * incorrect.
   2562 	 */
   2563 	wm_gmii_setup_phytype(sc, 0, 0);
   2564 
   2565 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2566 	switch (sc->sc_type) {
   2567 	case WM_T_ICH8:
   2568 	case WM_T_ICH9:
   2569 	case WM_T_ICH10:
   2570 	case WM_T_PCH:
   2571 	case WM_T_PCH2:
   2572 	case WM_T_PCH_LPT:
   2573 	case WM_T_PCH_SPT:
   2574 	case WM_T_PCH_CNP:
   2575 		apme_mask = WUC_APME;
   2576 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2577 		if ((eeprom_data & apme_mask) != 0)
   2578 			sc->sc_flags |= WM_F_WOL;
   2579 		break;
   2580 	default:
   2581 		break;
   2582 	}
   2583 
   2584 	/* Reset the chip to a known state. */
   2585 	wm_reset(sc);
   2586 
   2587 	/*
   2588 	 * Check for I21[01] PLL workaround.
   2589 	 *
   2590 	 * Three cases:
   2591 	 * a) Chip is I211.
   2592 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2593 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2594 	 */
   2595 	if (sc->sc_type == WM_T_I211)
   2596 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2597 	if (sc->sc_type == WM_T_I210) {
   2598 		if (!wm_nvm_flash_presence_i210(sc))
   2599 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2600 		else if ((sc->sc_nvm_ver_major < 3)
   2601 		    || ((sc->sc_nvm_ver_major == 3)
   2602 			&& (sc->sc_nvm_ver_minor < 25))) {
   2603 			aprint_verbose_dev(sc->sc_dev,
   2604 			    "ROM image version %d.%d is older than 3.25\n",
   2605 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2606 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2607 		}
   2608 	}
   2609 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2610 		wm_pll_workaround_i210(sc);
   2611 
   2612 	wm_get_wakeup(sc);
   2613 
   2614 	/* Non-AMT based hardware can now take control from firmware */
   2615 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2616 		wm_get_hw_control(sc);
   2617 
   2618 	/*
   2619 	 * Read the Ethernet address from the EEPROM, if not first found
   2620 	 * in device properties.
   2621 	 */
   2622 	ea = prop_dictionary_get(dict, "mac-address");
   2623 	if (ea != NULL) {
   2624 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2625 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2626 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2627 	} else {
   2628 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2629 			aprint_error_dev(sc->sc_dev,
   2630 			    "unable to read Ethernet address\n");
   2631 			goto out;
   2632 		}
   2633 	}
   2634 
   2635 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2636 	    ether_sprintf(enaddr));
   2637 
   2638 	/*
   2639 	 * Read the config info from the EEPROM, and set up various
   2640 	 * bits in the control registers based on their contents.
   2641 	 */
   2642 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2643 	if (pn != NULL) {
   2644 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2645 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2646 	} else {
   2647 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2648 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2649 			goto out;
   2650 		}
   2651 	}
   2652 
   2653 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2654 	if (pn != NULL) {
   2655 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2656 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2657 	} else {
   2658 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2659 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2660 			goto out;
   2661 		}
   2662 	}
   2663 
   2664 	/* check for WM_F_WOL */
   2665 	switch (sc->sc_type) {
   2666 	case WM_T_82542_2_0:
   2667 	case WM_T_82542_2_1:
   2668 	case WM_T_82543:
   2669 		/* dummy? */
   2670 		eeprom_data = 0;
   2671 		apme_mask = NVM_CFG3_APME;
   2672 		break;
   2673 	case WM_T_82544:
   2674 		apme_mask = NVM_CFG2_82544_APM_EN;
   2675 		eeprom_data = cfg2;
   2676 		break;
   2677 	case WM_T_82546:
   2678 	case WM_T_82546_3:
   2679 	case WM_T_82571:
   2680 	case WM_T_82572:
   2681 	case WM_T_82573:
   2682 	case WM_T_82574:
   2683 	case WM_T_82583:
   2684 	case WM_T_80003:
   2685 	case WM_T_82575:
   2686 	case WM_T_82576:
   2687 		apme_mask = NVM_CFG3_APME;
   2688 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2689 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2690 		break;
   2691 	case WM_T_82580:
   2692 	case WM_T_I350:
   2693 	case WM_T_I354:
   2694 	case WM_T_I210:
   2695 	case WM_T_I211:
   2696 		apme_mask = NVM_CFG3_APME;
   2697 		wm_nvm_read(sc,
   2698 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2699 		    1, &eeprom_data);
   2700 		break;
   2701 	case WM_T_ICH8:
   2702 	case WM_T_ICH9:
   2703 	case WM_T_ICH10:
   2704 	case WM_T_PCH:
   2705 	case WM_T_PCH2:
   2706 	case WM_T_PCH_LPT:
   2707 	case WM_T_PCH_SPT:
   2708 	case WM_T_PCH_CNP:
   2709 		/* Already checked before wm_reset () */
   2710 		apme_mask = eeprom_data = 0;
   2711 		break;
   2712 	default: /* XXX 82540 */
   2713 		apme_mask = NVM_CFG3_APME;
   2714 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2715 		break;
   2716 	}
   2717 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2718 	if ((eeprom_data & apme_mask) != 0)
   2719 		sc->sc_flags |= WM_F_WOL;
   2720 
   2721 	/*
   2722 	 * We have the eeprom settings, now apply the special cases
   2723 	 * where the eeprom may be wrong or the board won't support
   2724 	 * wake on lan on a particular port
   2725 	 */
   2726 	switch (sc->sc_pcidevid) {
   2727 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2728 		sc->sc_flags &= ~WM_F_WOL;
   2729 		break;
   2730 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2731 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2732 		/* Wake events only supported on port A for dual fiber
   2733 		 * regardless of eeprom setting */
   2734 		if (sc->sc_funcid == 1)
   2735 			sc->sc_flags &= ~WM_F_WOL;
   2736 		break;
   2737 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2738 		/* If quad port adapter, disable WoL on all but port A */
   2739 		if (sc->sc_funcid != 0)
   2740 			sc->sc_flags &= ~WM_F_WOL;
   2741 		break;
   2742 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2743 		/* Wake events only supported on port A for dual fiber
   2744 		 * regardless of eeprom setting */
   2745 		if (sc->sc_funcid == 1)
   2746 			sc->sc_flags &= ~WM_F_WOL;
   2747 		break;
   2748 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2749 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2750 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2751 		/* If quad port adapter, disable WoL on all but port A */
   2752 		if (sc->sc_funcid != 0)
   2753 			sc->sc_flags &= ~WM_F_WOL;
   2754 		break;
   2755 	}
   2756 
   2757 	if (sc->sc_type >= WM_T_82575) {
   2758 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2759 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2760 			    nvmword);
   2761 			if ((sc->sc_type == WM_T_82575) ||
   2762 			    (sc->sc_type == WM_T_82576)) {
   2763 				/* Check NVM for autonegotiation */
   2764 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2765 				    != 0)
   2766 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2767 			}
   2768 			if ((sc->sc_type == WM_T_82575) ||
   2769 			    (sc->sc_type == WM_T_I350)) {
   2770 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2771 					sc->sc_flags |= WM_F_MAS;
   2772 			}
   2773 		}
   2774 	}
   2775 
   2776 	/*
   2777 	 * XXX need special handling for some multiple port cards
   2778 	 * to disable a paticular port.
   2779 	 */
   2780 
   2781 	if (sc->sc_type >= WM_T_82544) {
   2782 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2783 		if (pn != NULL) {
   2784 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2785 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2786 		} else {
   2787 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2788 				aprint_error_dev(sc->sc_dev,
   2789 				    "unable to read SWDPIN\n");
   2790 				goto out;
   2791 			}
   2792 		}
   2793 	}
   2794 
   2795 	if (cfg1 & NVM_CFG1_ILOS)
   2796 		sc->sc_ctrl |= CTRL_ILOS;
   2797 
   2798 	/*
   2799 	 * XXX
   2800 	 * This code isn't correct because pin 2 and 3 are located
   2801 	 * in different position on newer chips. Check all datasheet.
   2802 	 *
   2803 	 * Until resolve this problem, check if a chip < 82580
   2804 	 */
   2805 	if (sc->sc_type <= WM_T_82580) {
   2806 		if (sc->sc_type >= WM_T_82544) {
   2807 			sc->sc_ctrl |=
   2808 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2809 			    CTRL_SWDPIO_SHIFT;
   2810 			sc->sc_ctrl |=
   2811 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2812 			    CTRL_SWDPINS_SHIFT;
   2813 		} else {
   2814 			sc->sc_ctrl |=
   2815 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2816 			    CTRL_SWDPIO_SHIFT;
   2817 		}
   2818 	}
   2819 
   2820 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2821 		wm_nvm_read(sc,
   2822 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2823 		    1, &nvmword);
   2824 		if (nvmword & NVM_CFG3_ILOS)
   2825 			sc->sc_ctrl |= CTRL_ILOS;
   2826 	}
   2827 
   2828 #if 0
   2829 	if (sc->sc_type >= WM_T_82544) {
   2830 		if (cfg1 & NVM_CFG1_IPS0)
   2831 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2832 		if (cfg1 & NVM_CFG1_IPS1)
   2833 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2834 		sc->sc_ctrl_ext |=
   2835 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2836 		    CTRL_EXT_SWDPIO_SHIFT;
   2837 		sc->sc_ctrl_ext |=
   2838 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2839 		    CTRL_EXT_SWDPINS_SHIFT;
   2840 	} else {
   2841 		sc->sc_ctrl_ext |=
   2842 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2843 		    CTRL_EXT_SWDPIO_SHIFT;
   2844 	}
   2845 #endif
   2846 
   2847 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2848 #if 0
   2849 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2850 #endif
   2851 
   2852 	if (sc->sc_type == WM_T_PCH) {
   2853 		uint16_t val;
   2854 
   2855 		/* Save the NVM K1 bit setting */
   2856 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2857 
   2858 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2859 			sc->sc_nvm_k1_enabled = 1;
   2860 		else
   2861 			sc->sc_nvm_k1_enabled = 0;
   2862 	}
   2863 
   2864 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2865 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2866 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2867 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2868 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2869 	    || sc->sc_type == WM_T_82573
   2870 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2871 		/* Copper only */
   2872 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2873 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2874 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2875 	    || (sc->sc_type ==WM_T_I211)) {
   2876 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2877 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2878 		switch (link_mode) {
   2879 		case CTRL_EXT_LINK_MODE_1000KX:
   2880 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2881 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2882 			break;
   2883 		case CTRL_EXT_LINK_MODE_SGMII:
   2884 			if (wm_sgmii_uses_mdio(sc)) {
   2885 				aprint_normal_dev(sc->sc_dev,
   2886 				    "SGMII(MDIO)\n");
   2887 				sc->sc_flags |= WM_F_SGMII;
   2888 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2889 				break;
   2890 			}
   2891 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2892 			/*FALLTHROUGH*/
   2893 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2894 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2895 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2896 				if (link_mode
   2897 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2898 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2899 					sc->sc_flags |= WM_F_SGMII;
   2900 					aprint_verbose_dev(sc->sc_dev,
   2901 					    "SGMII\n");
   2902 				} else {
   2903 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2904 					aprint_verbose_dev(sc->sc_dev,
   2905 					    "SERDES\n");
   2906 				}
   2907 				break;
   2908 			}
   2909 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2910 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2911 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2912 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2913 				sc->sc_flags |= WM_F_SGMII;
   2914 			}
   2915 			/* Do not change link mode for 100BaseFX */
   2916 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2917 				break;
   2918 
   2919 			/* Change current link mode setting */
   2920 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2921 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2922 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2923 			else
   2924 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2925 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2926 			break;
   2927 		case CTRL_EXT_LINK_MODE_GMII:
   2928 		default:
   2929 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2930 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2931 			break;
   2932 		}
   2933 
   2934 		reg &= ~CTRL_EXT_I2C_ENA;
   2935 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2936 			reg |= CTRL_EXT_I2C_ENA;
   2937 		else
   2938 			reg &= ~CTRL_EXT_I2C_ENA;
   2939 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2940 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2941 			if (!wm_sgmii_uses_mdio(sc))
   2942 				wm_gmii_setup_phytype(sc, 0, 0);
   2943 			wm_reset_mdicnfg_82580(sc);
   2944 		}
   2945 	} else if (sc->sc_type < WM_T_82543 ||
   2946 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2947 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2948 			aprint_error_dev(sc->sc_dev,
   2949 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2950 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2951 		}
   2952 	} else {
   2953 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2954 			aprint_error_dev(sc->sc_dev,
   2955 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2956 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2957 		}
   2958 	}
   2959 
   2960 	if (sc->sc_type >= WM_T_PCH2)
   2961 		sc->sc_flags |= WM_F_EEE;
   2962 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2963 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2964 		/* XXX: Need special handling for I354. (not yet) */
   2965 		if (sc->sc_type != WM_T_I354)
   2966 			sc->sc_flags |= WM_F_EEE;
   2967 	}
   2968 
   2969 	/*
   2970 	 * The I350 has a bug where it always strips the CRC whether
   2971 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2972 	 */
   2973 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2974 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2975 		sc->sc_flags |= WM_F_CRC_STRIP;
   2976 
   2977 	/* Set device properties (macflags) */
   2978 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2979 
   2980 	if (sc->sc_flags != 0) {
   2981 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2982 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2983 	}
   2984 
   2985 #ifdef WM_MPSAFE
   2986 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2987 #else
   2988 	sc->sc_core_lock = NULL;
   2989 #endif
   2990 
   2991 	/* Initialize the media structures accordingly. */
   2992 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2993 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2994 	else
   2995 		wm_tbi_mediainit(sc); /* All others */
   2996 
   2997 	ifp = &sc->sc_ethercom.ec_if;
   2998 	xname = device_xname(sc->sc_dev);
   2999 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3000 	ifp->if_softc = sc;
   3001 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3002 #ifdef WM_MPSAFE
   3003 	ifp->if_extflags = IFEF_MPSAFE;
   3004 #endif
   3005 	ifp->if_ioctl = wm_ioctl;
   3006 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3007 		ifp->if_start = wm_nq_start;
   3008 		/*
   3009 		 * When the number of CPUs is one and the controller can use
   3010 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3011 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3012 		 * and the other is used for link status changing.
   3013 		 * In this situation, wm_nq_transmit() is disadvantageous
   3014 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3015 		 */
   3016 		if (wm_is_using_multiqueue(sc))
   3017 			ifp->if_transmit = wm_nq_transmit;
   3018 	} else {
   3019 		ifp->if_start = wm_start;
   3020 		/*
   3021 		 * wm_transmit() has the same disadvantage as wm_transmit().
   3022 		 */
   3023 		if (wm_is_using_multiqueue(sc))
   3024 			ifp->if_transmit = wm_transmit;
   3025 	}
   3026 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3027 	ifp->if_init = wm_init;
   3028 	ifp->if_stop = wm_stop;
   3029 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3030 	IFQ_SET_READY(&ifp->if_snd);
   3031 
   3032 	/* Check for jumbo frame */
   3033 	switch (sc->sc_type) {
   3034 	case WM_T_82573:
   3035 		/* XXX limited to 9234 if ASPM is disabled */
   3036 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3037 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3038 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3039 		break;
   3040 	case WM_T_82571:
   3041 	case WM_T_82572:
   3042 	case WM_T_82574:
   3043 	case WM_T_82583:
   3044 	case WM_T_82575:
   3045 	case WM_T_82576:
   3046 	case WM_T_82580:
   3047 	case WM_T_I350:
   3048 	case WM_T_I354:
   3049 	case WM_T_I210:
   3050 	case WM_T_I211:
   3051 	case WM_T_80003:
   3052 	case WM_T_ICH9:
   3053 	case WM_T_ICH10:
   3054 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3055 	case WM_T_PCH_LPT:
   3056 	case WM_T_PCH_SPT:
   3057 	case WM_T_PCH_CNP:
   3058 		/* XXX limited to 9234 */
   3059 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3060 		break;
   3061 	case WM_T_PCH:
   3062 		/* XXX limited to 4096 */
   3063 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3064 		break;
   3065 	case WM_T_82542_2_0:
   3066 	case WM_T_82542_2_1:
   3067 	case WM_T_ICH8:
   3068 		/* No support for jumbo frame */
   3069 		break;
   3070 	default:
   3071 		/* ETHER_MAX_LEN_JUMBO */
   3072 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3073 		break;
   3074 	}
   3075 
   3076 	/* If we're a i82543 or greater, we can support VLANs. */
   3077 	if (sc->sc_type >= WM_T_82543) {
   3078 		sc->sc_ethercom.ec_capabilities |=
   3079 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3080 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3081 	}
   3082 
   3083 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3084 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3085 
   3086 	/*
   3087 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3088 	 * on i82543 and later.
   3089 	 */
   3090 	if (sc->sc_type >= WM_T_82543) {
   3091 		ifp->if_capabilities |=
   3092 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3093 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3094 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3095 		    IFCAP_CSUM_TCPv6_Tx |
   3096 		    IFCAP_CSUM_UDPv6_Tx;
   3097 	}
   3098 
   3099 	/*
   3100 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3101 	 *
   3102 	 *	82541GI (8086:1076) ... no
   3103 	 *	82572EI (8086:10b9) ... yes
   3104 	 */
   3105 	if (sc->sc_type >= WM_T_82571) {
   3106 		ifp->if_capabilities |=
   3107 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3108 	}
   3109 
   3110 	/*
   3111 	 * If we're a i82544 or greater (except i82547), we can do
   3112 	 * TCP segmentation offload.
   3113 	 */
   3114 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3115 		ifp->if_capabilities |= IFCAP_TSOv4;
   3116 	}
   3117 
   3118 	if (sc->sc_type >= WM_T_82571) {
   3119 		ifp->if_capabilities |= IFCAP_TSOv6;
   3120 	}
   3121 
   3122 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3123 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3124 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3125 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3126 
   3127 	/* Attach the interface. */
   3128 	if_initialize(ifp);
   3129 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3130 	ether_ifattach(ifp, enaddr);
   3131 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3132 	if_register(ifp);
   3133 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3134 	    RND_FLAG_DEFAULT);
   3135 
   3136 #ifdef WM_EVENT_COUNTERS
   3137 	/* Attach event counters. */
   3138 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3139 	    NULL, xname, "linkintr");
   3140 
   3141 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3142 	    NULL, xname, "tx_xoff");
   3143 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3144 	    NULL, xname, "tx_xon");
   3145 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3146 	    NULL, xname, "rx_xoff");
   3147 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3148 	    NULL, xname, "rx_xon");
   3149 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3150 	    NULL, xname, "rx_macctl");
   3151 #endif /* WM_EVENT_COUNTERS */
   3152 
   3153 	sc->sc_txrx_use_workqueue = false;
   3154 
   3155 	if (wm_phy_need_linkdown_discard(sc))
   3156 		wm_set_linkdown_discard(sc);
   3157 
   3158 	wm_init_sysctls(sc);
   3159 
   3160 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3161 		pmf_class_network_register(self, ifp);
   3162 	else
   3163 		aprint_error_dev(self, "couldn't establish power handler\n");
   3164 
   3165 	sc->sc_flags |= WM_F_ATTACHED;
   3166 out:
   3167 	return;
   3168 }
   3169 
   3170 /* The detach function (ca_detach) */
   3171 static int
   3172 wm_detach(device_t self, int flags __unused)
   3173 {
   3174 	struct wm_softc *sc = device_private(self);
   3175 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3176 	int i;
   3177 
   3178 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3179 		return 0;
   3180 
   3181 	/* Stop the interface. Callouts are stopped in it. */
   3182 	wm_stop(ifp, 1);
   3183 
   3184 	pmf_device_deregister(self);
   3185 
   3186 	sysctl_teardown(&sc->sc_sysctllog);
   3187 
   3188 #ifdef WM_EVENT_COUNTERS
   3189 	evcnt_detach(&sc->sc_ev_linkintr);
   3190 
   3191 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3192 	evcnt_detach(&sc->sc_ev_tx_xon);
   3193 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3194 	evcnt_detach(&sc->sc_ev_rx_xon);
   3195 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3196 #endif /* WM_EVENT_COUNTERS */
   3197 
   3198 	rnd_detach_source(&sc->rnd_source);
   3199 
   3200 	/* Tell the firmware about the release */
   3201 	WM_CORE_LOCK(sc);
   3202 	wm_release_manageability(sc);
   3203 	wm_release_hw_control(sc);
   3204 	wm_enable_wakeup(sc);
   3205 	WM_CORE_UNLOCK(sc);
   3206 
   3207 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3208 
   3209 	ether_ifdetach(ifp);
   3210 	if_detach(ifp);
   3211 	if_percpuq_destroy(sc->sc_ipq);
   3212 
   3213 	/* Delete all remaining media. */
   3214 	ifmedia_fini(&sc->sc_mii.mii_media);
   3215 
   3216 	/* Unload RX dmamaps and free mbufs */
   3217 	for (i = 0; i < sc->sc_nqueues; i++) {
   3218 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3219 		mutex_enter(rxq->rxq_lock);
   3220 		wm_rxdrain(rxq);
   3221 		mutex_exit(rxq->rxq_lock);
   3222 	}
   3223 	/* Must unlock here */
   3224 
   3225 	/* Disestablish the interrupt handler */
   3226 	for (i = 0; i < sc->sc_nintrs; i++) {
   3227 		if (sc->sc_ihs[i] != NULL) {
   3228 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3229 			sc->sc_ihs[i] = NULL;
   3230 		}
   3231 	}
   3232 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3233 
   3234 	/* wm_stop() ensure workqueue is stopped. */
   3235 	workqueue_destroy(sc->sc_queue_wq);
   3236 
   3237 	for (i = 0; i < sc->sc_nqueues; i++)
   3238 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3239 
   3240 	wm_free_txrx_queues(sc);
   3241 
   3242 	/* Unmap the registers */
   3243 	if (sc->sc_ss) {
   3244 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3245 		sc->sc_ss = 0;
   3246 	}
   3247 	if (sc->sc_ios) {
   3248 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3249 		sc->sc_ios = 0;
   3250 	}
   3251 	if (sc->sc_flashs) {
   3252 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3253 		sc->sc_flashs = 0;
   3254 	}
   3255 
   3256 	if (sc->sc_core_lock)
   3257 		mutex_obj_free(sc->sc_core_lock);
   3258 	if (sc->sc_ich_phymtx)
   3259 		mutex_obj_free(sc->sc_ich_phymtx);
   3260 	if (sc->sc_ich_nvmmtx)
   3261 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3262 
   3263 	return 0;
   3264 }
   3265 
   3266 static bool
   3267 wm_suspend(device_t self, const pmf_qual_t *qual)
   3268 {
   3269 	struct wm_softc *sc = device_private(self);
   3270 
   3271 	wm_release_manageability(sc);
   3272 	wm_release_hw_control(sc);
   3273 	wm_enable_wakeup(sc);
   3274 
   3275 	return true;
   3276 }
   3277 
   3278 static bool
   3279 wm_resume(device_t self, const pmf_qual_t *qual)
   3280 {
   3281 	struct wm_softc *sc = device_private(self);
   3282 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3283 	pcireg_t reg;
   3284 	char buf[256];
   3285 
   3286 	reg = CSR_READ(sc, WMREG_WUS);
   3287 	if (reg != 0) {
   3288 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3289 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3290 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3291 	}
   3292 
   3293 	if (sc->sc_type >= WM_T_PCH2)
   3294 		wm_resume_workarounds_pchlan(sc);
   3295 	if ((ifp->if_flags & IFF_UP) == 0) {
   3296 		wm_reset(sc);
   3297 		/* Non-AMT based hardware can now take control from firmware */
   3298 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3299 			wm_get_hw_control(sc);
   3300 		wm_init_manageability(sc);
   3301 	} else {
   3302 		/*
   3303 		 * We called pmf_class_network_register(), so if_init() is
   3304 		 * automatically called when IFF_UP. wm_reset(),
   3305 		 * wm_get_hw_control() and wm_init_manageability() are called
   3306 		 * via wm_init().
   3307 		 */
   3308 	}
   3309 
   3310 	return true;
   3311 }
   3312 
   3313 /*
   3314  * wm_watchdog:		[ifnet interface function]
   3315  *
   3316  *	Watchdog timer handler.
   3317  */
   3318 static void
   3319 wm_watchdog(struct ifnet *ifp)
   3320 {
   3321 	int qid;
   3322 	struct wm_softc *sc = ifp->if_softc;
   3323 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3324 
   3325 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3326 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3327 
   3328 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3329 	}
   3330 
   3331 	/* IF any of queues hanged up, reset the interface. */
   3332 	if (hang_queue != 0) {
   3333 		(void)wm_init(ifp);
   3334 
   3335 		/*
   3336 		 * There are still some upper layer processing which call
   3337 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3338 		 */
   3339 		/* Try to get more packets going. */
   3340 		ifp->if_start(ifp);
   3341 	}
   3342 }
   3343 
   3344 
   3345 static void
   3346 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3347 {
   3348 
   3349 	mutex_enter(txq->txq_lock);
   3350 	if (txq->txq_sending &&
   3351 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3352 		wm_watchdog_txq_locked(ifp, txq, hang);
   3353 
   3354 	mutex_exit(txq->txq_lock);
   3355 }
   3356 
   3357 static void
   3358 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3359     uint16_t *hang)
   3360 {
   3361 	struct wm_softc *sc = ifp->if_softc;
   3362 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3363 
   3364 	KASSERT(mutex_owned(txq->txq_lock));
   3365 
   3366 	/*
   3367 	 * Since we're using delayed interrupts, sweep up
   3368 	 * before we report an error.
   3369 	 */
   3370 	wm_txeof(txq, UINT_MAX);
   3371 
   3372 	if (txq->txq_sending)
   3373 		*hang |= __BIT(wmq->wmq_id);
   3374 
   3375 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3376 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3377 		    device_xname(sc->sc_dev));
   3378 	} else {
   3379 #ifdef WM_DEBUG
   3380 		int i, j;
   3381 		struct wm_txsoft *txs;
   3382 #endif
   3383 		log(LOG_ERR,
   3384 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3385 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3386 		    txq->txq_next);
   3387 		if_statinc(ifp, if_oerrors);
   3388 #ifdef WM_DEBUG
   3389 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3390 		    i = WM_NEXTTXS(txq, i)) {
   3391 			txs = &txq->txq_soft[i];
   3392 			printf("txs %d tx %d -> %d\n",
   3393 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3394 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3395 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3396 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3397 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3398 					printf("\t %#08x%08x\n",
   3399 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3400 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3401 				} else {
   3402 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3403 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3404 					    txq->txq_descs[j].wtx_addr.wa_low);
   3405 					printf("\t %#04x%02x%02x%08x\n",
   3406 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3407 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3408 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3409 					    txq->txq_descs[j].wtx_cmdlen);
   3410 				}
   3411 				if (j == txs->txs_lastdesc)
   3412 					break;
   3413 			}
   3414 		}
   3415 #endif
   3416 	}
   3417 }
   3418 
   3419 /*
   3420  * wm_tick:
   3421  *
   3422  *	One second timer, used to check link status, sweep up
   3423  *	completed transmit jobs, etc.
   3424  */
   3425 static void
   3426 wm_tick(void *arg)
   3427 {
   3428 	struct wm_softc *sc = arg;
   3429 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3430 #ifndef WM_MPSAFE
   3431 	int s = splnet();
   3432 #endif
   3433 
   3434 	WM_CORE_LOCK(sc);
   3435 
   3436 	if (sc->sc_core_stopping) {
   3437 		WM_CORE_UNLOCK(sc);
   3438 #ifndef WM_MPSAFE
   3439 		splx(s);
   3440 #endif
   3441 		return;
   3442 	}
   3443 
   3444 	if (sc->sc_type >= WM_T_82542_2_1) {
   3445 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3446 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3447 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3448 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3449 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3450 	}
   3451 
   3452 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3453 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3454 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3455 	    + CSR_READ(sc, WMREG_CRCERRS)
   3456 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3457 	    + CSR_READ(sc, WMREG_SYMERRC)
   3458 	    + CSR_READ(sc, WMREG_RXERRC)
   3459 	    + CSR_READ(sc, WMREG_SEC)
   3460 	    + CSR_READ(sc, WMREG_CEXTERR)
   3461 	    + CSR_READ(sc, WMREG_RLEC));
   3462 	/*
   3463 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3464 	 * memory. It does not mean the number of dropped packet. Because
   3465 	 * ethernet controller can receive packets in such case if there is
   3466 	 * space in phy's FIFO.
   3467 	 *
   3468 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3469 	 * own EVCNT instead of if_iqdrops.
   3470 	 */
   3471 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3472 	IF_STAT_PUTREF(ifp);
   3473 
   3474 	if (sc->sc_flags & WM_F_HAS_MII)
   3475 		mii_tick(&sc->sc_mii);
   3476 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3477 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3478 		wm_serdes_tick(sc);
   3479 	else
   3480 		wm_tbi_tick(sc);
   3481 
   3482 	WM_CORE_UNLOCK(sc);
   3483 
   3484 	wm_watchdog(ifp);
   3485 
   3486 	callout_schedule(&sc->sc_tick_ch, hz);
   3487 }
   3488 
   3489 static int
   3490 wm_ifflags_cb(struct ethercom *ec)
   3491 {
   3492 	struct ifnet *ifp = &ec->ec_if;
   3493 	struct wm_softc *sc = ifp->if_softc;
   3494 	u_short iffchange;
   3495 	int ecchange;
   3496 	bool needreset = false;
   3497 	int rc = 0;
   3498 
   3499 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3500 		device_xname(sc->sc_dev), __func__));
   3501 
   3502 	WM_CORE_LOCK(sc);
   3503 
   3504 	/*
   3505 	 * Check for if_flags.
   3506 	 * Main usage is to prevent linkdown when opening bpf.
   3507 	 */
   3508 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3509 	sc->sc_if_flags = ifp->if_flags;
   3510 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3511 		needreset = true;
   3512 		goto ec;
   3513 	}
   3514 
   3515 	/* iff related updates */
   3516 	if ((iffchange & IFF_PROMISC) != 0)
   3517 		wm_set_filter(sc);
   3518 
   3519 	wm_set_vlan(sc);
   3520 
   3521 ec:
   3522 	/* Check for ec_capenable. */
   3523 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3524 	sc->sc_ec_capenable = ec->ec_capenable;
   3525 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3526 		needreset = true;
   3527 		goto out;
   3528 	}
   3529 
   3530 	/* ec related updates */
   3531 	wm_set_eee(sc);
   3532 
   3533 out:
   3534 	if (needreset)
   3535 		rc = ENETRESET;
   3536 	WM_CORE_UNLOCK(sc);
   3537 
   3538 	return rc;
   3539 }
   3540 
   3541 static bool
   3542 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3543 {
   3544 
   3545 	switch (sc->sc_phytype) {
   3546 	case WMPHY_82577: /* ihphy */
   3547 	case WMPHY_82578: /* atphy */
   3548 	case WMPHY_82579: /* ihphy */
   3549 	case WMPHY_I217: /* ihphy */
   3550 	case WMPHY_82580: /* ihphy */
   3551 	case WMPHY_I350: /* ihphy */
   3552 		return true;
   3553 	default:
   3554 		return false;
   3555 	}
   3556 }
   3557 
   3558 static void
   3559 wm_set_linkdown_discard(struct wm_softc *sc)
   3560 {
   3561 
   3562 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3563 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3564 
   3565 		mutex_enter(txq->txq_lock);
   3566 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3567 		mutex_exit(txq->txq_lock);
   3568 	}
   3569 }
   3570 
   3571 static void
   3572 wm_clear_linkdown_discard(struct wm_softc *sc)
   3573 {
   3574 
   3575 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3576 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3577 
   3578 		mutex_enter(txq->txq_lock);
   3579 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3580 		mutex_exit(txq->txq_lock);
   3581 	}
   3582 }
   3583 
   3584 /*
   3585  * wm_ioctl:		[ifnet interface function]
   3586  *
   3587  *	Handle control requests from the operator.
   3588  */
   3589 static int
   3590 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3591 {
   3592 	struct wm_softc *sc = ifp->if_softc;
   3593 	struct ifreq *ifr = (struct ifreq *)data;
   3594 	struct ifaddr *ifa = (struct ifaddr *)data;
   3595 	struct sockaddr_dl *sdl;
   3596 	int s, error;
   3597 
   3598 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3599 		device_xname(sc->sc_dev), __func__));
   3600 
   3601 #ifndef WM_MPSAFE
   3602 	s = splnet();
   3603 #endif
   3604 	switch (cmd) {
   3605 	case SIOCSIFMEDIA:
   3606 		WM_CORE_LOCK(sc);
   3607 		/* Flow control requires full-duplex mode. */
   3608 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3609 		    (ifr->ifr_media & IFM_FDX) == 0)
   3610 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3611 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3612 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3613 				/* We can do both TXPAUSE and RXPAUSE. */
   3614 				ifr->ifr_media |=
   3615 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3616 			}
   3617 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3618 		}
   3619 		WM_CORE_UNLOCK(sc);
   3620 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3621 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3622 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
   3623 				wm_set_linkdown_discard(sc);
   3624 			else
   3625 				wm_clear_linkdown_discard(sc);
   3626 		}
   3627 		break;
   3628 	case SIOCINITIFADDR:
   3629 		WM_CORE_LOCK(sc);
   3630 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3631 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3632 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3633 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3634 			/* Unicast address is the first multicast entry */
   3635 			wm_set_filter(sc);
   3636 			error = 0;
   3637 			WM_CORE_UNLOCK(sc);
   3638 			break;
   3639 		}
   3640 		WM_CORE_UNLOCK(sc);
   3641 		if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
   3642 			wm_clear_linkdown_discard(sc);
   3643 		/*FALLTHROUGH*/
   3644 	default:
   3645 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   3646 			if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
   3647 				wm_clear_linkdown_discard(sc);
   3648 			} else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
   3649 				wm_set_linkdown_discard(sc);
   3650 			}
   3651 		}
   3652 #ifdef WM_MPSAFE
   3653 		s = splnet();
   3654 #endif
   3655 		/* It may call wm_start, so unlock here */
   3656 		error = ether_ioctl(ifp, cmd, data);
   3657 #ifdef WM_MPSAFE
   3658 		splx(s);
   3659 #endif
   3660 		if (error != ENETRESET)
   3661 			break;
   3662 
   3663 		error = 0;
   3664 
   3665 		if (cmd == SIOCSIFCAP)
   3666 			error = (*ifp->if_init)(ifp);
   3667 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3668 			;
   3669 		else if (ifp->if_flags & IFF_RUNNING) {
   3670 			/*
   3671 			 * Multicast list has changed; set the hardware filter
   3672 			 * accordingly.
   3673 			 */
   3674 			WM_CORE_LOCK(sc);
   3675 			wm_set_filter(sc);
   3676 			WM_CORE_UNLOCK(sc);
   3677 		}
   3678 		break;
   3679 	}
   3680 
   3681 #ifndef WM_MPSAFE
   3682 	splx(s);
   3683 #endif
   3684 	return error;
   3685 }
   3686 
   3687 /* MAC address related */
   3688 
   3689 /*
   3690  * Get the offset of MAC address and return it.
   3691  * If error occured, use offset 0.
   3692  */
   3693 static uint16_t
   3694 wm_check_alt_mac_addr(struct wm_softc *sc)
   3695 {
   3696 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3697 	uint16_t offset = NVM_OFF_MACADDR;
   3698 
   3699 	/* Try to read alternative MAC address pointer */
   3700 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3701 		return 0;
   3702 
   3703 	/* Check pointer if it's valid or not. */
   3704 	if ((offset == 0x0000) || (offset == 0xffff))
   3705 		return 0;
   3706 
   3707 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3708 	/*
   3709 	 * Check whether alternative MAC address is valid or not.
   3710 	 * Some cards have non 0xffff pointer but those don't use
   3711 	 * alternative MAC address in reality.
   3712 	 *
   3713 	 * Check whether the broadcast bit is set or not.
   3714 	 */
   3715 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3716 		if (((myea[0] & 0xff) & 0x01) == 0)
   3717 			return offset; /* Found */
   3718 
   3719 	/* Not found */
   3720 	return 0;
   3721 }
   3722 
   3723 static int
   3724 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3725 {
   3726 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3727 	uint16_t offset = NVM_OFF_MACADDR;
   3728 	int do_invert = 0;
   3729 
   3730 	switch (sc->sc_type) {
   3731 	case WM_T_82580:
   3732 	case WM_T_I350:
   3733 	case WM_T_I354:
   3734 		/* EEPROM Top Level Partitioning */
   3735 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3736 		break;
   3737 	case WM_T_82571:
   3738 	case WM_T_82575:
   3739 	case WM_T_82576:
   3740 	case WM_T_80003:
   3741 	case WM_T_I210:
   3742 	case WM_T_I211:
   3743 		offset = wm_check_alt_mac_addr(sc);
   3744 		if (offset == 0)
   3745 			if ((sc->sc_funcid & 0x01) == 1)
   3746 				do_invert = 1;
   3747 		break;
   3748 	default:
   3749 		if ((sc->sc_funcid & 0x01) == 1)
   3750 			do_invert = 1;
   3751 		break;
   3752 	}
   3753 
   3754 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3755 		goto bad;
   3756 
   3757 	enaddr[0] = myea[0] & 0xff;
   3758 	enaddr[1] = myea[0] >> 8;
   3759 	enaddr[2] = myea[1] & 0xff;
   3760 	enaddr[3] = myea[1] >> 8;
   3761 	enaddr[4] = myea[2] & 0xff;
   3762 	enaddr[5] = myea[2] >> 8;
   3763 
   3764 	/*
   3765 	 * Toggle the LSB of the MAC address on the second port
   3766 	 * of some dual port cards.
   3767 	 */
   3768 	if (do_invert != 0)
   3769 		enaddr[5] ^= 1;
   3770 
   3771 	return 0;
   3772 
   3773  bad:
   3774 	return -1;
   3775 }
   3776 
   3777 /*
   3778  * wm_set_ral:
   3779  *
   3780  *	Set an entery in the receive address list.
   3781  */
   3782 static void
   3783 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3784 {
   3785 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3786 	uint32_t wlock_mac;
   3787 	int rv;
   3788 
   3789 	if (enaddr != NULL) {
   3790 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3791 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3792 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3793 		ral_hi |= RAL_AV;
   3794 	} else {
   3795 		ral_lo = 0;
   3796 		ral_hi = 0;
   3797 	}
   3798 
   3799 	switch (sc->sc_type) {
   3800 	case WM_T_82542_2_0:
   3801 	case WM_T_82542_2_1:
   3802 	case WM_T_82543:
   3803 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3804 		CSR_WRITE_FLUSH(sc);
   3805 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3806 		CSR_WRITE_FLUSH(sc);
   3807 		break;
   3808 	case WM_T_PCH2:
   3809 	case WM_T_PCH_LPT:
   3810 	case WM_T_PCH_SPT:
   3811 	case WM_T_PCH_CNP:
   3812 		if (idx == 0) {
   3813 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3814 			CSR_WRITE_FLUSH(sc);
   3815 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3816 			CSR_WRITE_FLUSH(sc);
   3817 			return;
   3818 		}
   3819 		if (sc->sc_type != WM_T_PCH2) {
   3820 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3821 			    FWSM_WLOCK_MAC);
   3822 			addrl = WMREG_SHRAL(idx - 1);
   3823 			addrh = WMREG_SHRAH(idx - 1);
   3824 		} else {
   3825 			wlock_mac = 0;
   3826 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3827 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3828 		}
   3829 
   3830 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3831 			rv = wm_get_swflag_ich8lan(sc);
   3832 			if (rv != 0)
   3833 				return;
   3834 			CSR_WRITE(sc, addrl, ral_lo);
   3835 			CSR_WRITE_FLUSH(sc);
   3836 			CSR_WRITE(sc, addrh, ral_hi);
   3837 			CSR_WRITE_FLUSH(sc);
   3838 			wm_put_swflag_ich8lan(sc);
   3839 		}
   3840 
   3841 		break;
   3842 	default:
   3843 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3844 		CSR_WRITE_FLUSH(sc);
   3845 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3846 		CSR_WRITE_FLUSH(sc);
   3847 		break;
   3848 	}
   3849 }
   3850 
   3851 /*
   3852  * wm_mchash:
   3853  *
   3854  *	Compute the hash of the multicast address for the 4096-bit
   3855  *	multicast filter.
   3856  */
   3857 static uint32_t
   3858 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3859 {
   3860 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3861 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3862 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3863 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3864 	uint32_t hash;
   3865 
   3866 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3867 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3868 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3869 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3870 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3871 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3872 		return (hash & 0x3ff);
   3873 	}
   3874 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3875 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3876 
   3877 	return (hash & 0xfff);
   3878 }
   3879 
   3880 /*
   3881  *
   3882  *
   3883  */
   3884 static int
   3885 wm_rar_count(struct wm_softc *sc)
   3886 {
   3887 	int size;
   3888 
   3889 	switch (sc->sc_type) {
   3890 	case WM_T_ICH8:
   3891 		size = WM_RAL_TABSIZE_ICH8 -1;
   3892 		break;
   3893 	case WM_T_ICH9:
   3894 	case WM_T_ICH10:
   3895 	case WM_T_PCH:
   3896 		size = WM_RAL_TABSIZE_ICH8;
   3897 		break;
   3898 	case WM_T_PCH2:
   3899 		size = WM_RAL_TABSIZE_PCH2;
   3900 		break;
   3901 	case WM_T_PCH_LPT:
   3902 	case WM_T_PCH_SPT:
   3903 	case WM_T_PCH_CNP:
   3904 		size = WM_RAL_TABSIZE_PCH_LPT;
   3905 		break;
   3906 	case WM_T_82575:
   3907 	case WM_T_I210:
   3908 	case WM_T_I211:
   3909 		size = WM_RAL_TABSIZE_82575;
   3910 		break;
   3911 	case WM_T_82576:
   3912 	case WM_T_82580:
   3913 		size = WM_RAL_TABSIZE_82576;
   3914 		break;
   3915 	case WM_T_I350:
   3916 	case WM_T_I354:
   3917 		size = WM_RAL_TABSIZE_I350;
   3918 		break;
   3919 	default:
   3920 		size = WM_RAL_TABSIZE;
   3921 	}
   3922 
   3923 	return size;
   3924 }
   3925 
   3926 /*
   3927  * wm_set_filter:
   3928  *
   3929  *	Set up the receive filter.
   3930  */
   3931 static void
   3932 wm_set_filter(struct wm_softc *sc)
   3933 {
   3934 	struct ethercom *ec = &sc->sc_ethercom;
   3935 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3936 	struct ether_multi *enm;
   3937 	struct ether_multistep step;
   3938 	bus_addr_t mta_reg;
   3939 	uint32_t hash, reg, bit;
   3940 	int i, size, ralmax, rv;
   3941 
   3942 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3943 		device_xname(sc->sc_dev), __func__));
   3944 
   3945 	if (sc->sc_type >= WM_T_82544)
   3946 		mta_reg = WMREG_CORDOVA_MTA;
   3947 	else
   3948 		mta_reg = WMREG_MTA;
   3949 
   3950 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3951 
   3952 	if (ifp->if_flags & IFF_BROADCAST)
   3953 		sc->sc_rctl |= RCTL_BAM;
   3954 	if (ifp->if_flags & IFF_PROMISC) {
   3955 		sc->sc_rctl |= RCTL_UPE;
   3956 		ETHER_LOCK(ec);
   3957 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3958 		ETHER_UNLOCK(ec);
   3959 		goto allmulti;
   3960 	}
   3961 
   3962 	/*
   3963 	 * Set the station address in the first RAL slot, and
   3964 	 * clear the remaining slots.
   3965 	 */
   3966 	size = wm_rar_count(sc);
   3967 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3968 
   3969 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3970 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3971 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3972 		switch (i) {
   3973 		case 0:
   3974 			/* We can use all entries */
   3975 			ralmax = size;
   3976 			break;
   3977 		case 1:
   3978 			/* Only RAR[0] */
   3979 			ralmax = 1;
   3980 			break;
   3981 		default:
   3982 			/* Available SHRA + RAR[0] */
   3983 			ralmax = i + 1;
   3984 		}
   3985 	} else
   3986 		ralmax = size;
   3987 	for (i = 1; i < size; i++) {
   3988 		if (i < ralmax)
   3989 			wm_set_ral(sc, NULL, i);
   3990 	}
   3991 
   3992 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3993 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3994 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3995 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3996 		size = WM_ICH8_MC_TABSIZE;
   3997 	else
   3998 		size = WM_MC_TABSIZE;
   3999 	/* Clear out the multicast table. */
   4000 	for (i = 0; i < size; i++) {
   4001 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4002 		CSR_WRITE_FLUSH(sc);
   4003 	}
   4004 
   4005 	ETHER_LOCK(ec);
   4006 	ETHER_FIRST_MULTI(step, ec, enm);
   4007 	while (enm != NULL) {
   4008 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4009 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4010 			ETHER_UNLOCK(ec);
   4011 			/*
   4012 			 * We must listen to a range of multicast addresses.
   4013 			 * For now, just accept all multicasts, rather than
   4014 			 * trying to set only those filter bits needed to match
   4015 			 * the range.  (At this time, the only use of address
   4016 			 * ranges is for IP multicast routing, for which the
   4017 			 * range is big enough to require all bits set.)
   4018 			 */
   4019 			goto allmulti;
   4020 		}
   4021 
   4022 		hash = wm_mchash(sc, enm->enm_addrlo);
   4023 
   4024 		reg = (hash >> 5);
   4025 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4026 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4027 		    || (sc->sc_type == WM_T_PCH2)
   4028 		    || (sc->sc_type == WM_T_PCH_LPT)
   4029 		    || (sc->sc_type == WM_T_PCH_SPT)
   4030 		    || (sc->sc_type == WM_T_PCH_CNP))
   4031 			reg &= 0x1f;
   4032 		else
   4033 			reg &= 0x7f;
   4034 		bit = hash & 0x1f;
   4035 
   4036 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4037 		hash |= 1U << bit;
   4038 
   4039 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4040 			/*
   4041 			 * 82544 Errata 9: Certain register cannot be written
   4042 			 * with particular alignments in PCI-X bus operation
   4043 			 * (FCAH, MTA and VFTA).
   4044 			 */
   4045 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4046 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4047 			CSR_WRITE_FLUSH(sc);
   4048 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4049 			CSR_WRITE_FLUSH(sc);
   4050 		} else {
   4051 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4052 			CSR_WRITE_FLUSH(sc);
   4053 		}
   4054 
   4055 		ETHER_NEXT_MULTI(step, enm);
   4056 	}
   4057 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4058 	ETHER_UNLOCK(ec);
   4059 
   4060 	goto setit;
   4061 
   4062  allmulti:
   4063 	sc->sc_rctl |= RCTL_MPE;
   4064 
   4065  setit:
   4066 	if (sc->sc_type >= WM_T_PCH2) {
   4067 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4068 		    && (ifp->if_mtu > ETHERMTU))
   4069 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4070 		else
   4071 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4072 		if (rv != 0)
   4073 			device_printf(sc->sc_dev,
   4074 			    "Failed to do workaround for jumbo frame.\n");
   4075 	}
   4076 
   4077 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4078 }
   4079 
   4080 /* Reset and init related */
   4081 
   4082 static void
   4083 wm_set_vlan(struct wm_softc *sc)
   4084 {
   4085 
   4086 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4087 		device_xname(sc->sc_dev), __func__));
   4088 
   4089 	/* Deal with VLAN enables. */
   4090 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4091 		sc->sc_ctrl |= CTRL_VME;
   4092 	else
   4093 		sc->sc_ctrl &= ~CTRL_VME;
   4094 
   4095 	/* Write the control registers. */
   4096 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4097 }
   4098 
   4099 static void
   4100 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4101 {
   4102 	uint32_t gcr;
   4103 	pcireg_t ctrl2;
   4104 
   4105 	gcr = CSR_READ(sc, WMREG_GCR);
   4106 
   4107 	/* Only take action if timeout value is defaulted to 0 */
   4108 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4109 		goto out;
   4110 
   4111 	if ((gcr & GCR_CAP_VER2) == 0) {
   4112 		gcr |= GCR_CMPL_TMOUT_10MS;
   4113 		goto out;
   4114 	}
   4115 
   4116 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4117 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4118 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4119 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4120 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4121 
   4122 out:
   4123 	/* Disable completion timeout resend */
   4124 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4125 
   4126 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4127 }
   4128 
   4129 void
   4130 wm_get_auto_rd_done(struct wm_softc *sc)
   4131 {
   4132 	int i;
   4133 
   4134 	/* wait for eeprom to reload */
   4135 	switch (sc->sc_type) {
   4136 	case WM_T_82571:
   4137 	case WM_T_82572:
   4138 	case WM_T_82573:
   4139 	case WM_T_82574:
   4140 	case WM_T_82583:
   4141 	case WM_T_82575:
   4142 	case WM_T_82576:
   4143 	case WM_T_82580:
   4144 	case WM_T_I350:
   4145 	case WM_T_I354:
   4146 	case WM_T_I210:
   4147 	case WM_T_I211:
   4148 	case WM_T_80003:
   4149 	case WM_T_ICH8:
   4150 	case WM_T_ICH9:
   4151 		for (i = 0; i < 10; i++) {
   4152 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4153 				break;
   4154 			delay(1000);
   4155 		}
   4156 		if (i == 10) {
   4157 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4158 			    "complete\n", device_xname(sc->sc_dev));
   4159 		}
   4160 		break;
   4161 	default:
   4162 		break;
   4163 	}
   4164 }
   4165 
   4166 void
   4167 wm_lan_init_done(struct wm_softc *sc)
   4168 {
   4169 	uint32_t reg = 0;
   4170 	int i;
   4171 
   4172 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4173 		device_xname(sc->sc_dev), __func__));
   4174 
   4175 	/* Wait for eeprom to reload */
   4176 	switch (sc->sc_type) {
   4177 	case WM_T_ICH10:
   4178 	case WM_T_PCH:
   4179 	case WM_T_PCH2:
   4180 	case WM_T_PCH_LPT:
   4181 	case WM_T_PCH_SPT:
   4182 	case WM_T_PCH_CNP:
   4183 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4184 			reg = CSR_READ(sc, WMREG_STATUS);
   4185 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4186 				break;
   4187 			delay(100);
   4188 		}
   4189 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4190 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4191 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4192 		}
   4193 		break;
   4194 	default:
   4195 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4196 		    __func__);
   4197 		break;
   4198 	}
   4199 
   4200 	reg &= ~STATUS_LAN_INIT_DONE;
   4201 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4202 }
   4203 
   4204 void
   4205 wm_get_cfg_done(struct wm_softc *sc)
   4206 {
   4207 	int mask;
   4208 	uint32_t reg;
   4209 	int i;
   4210 
   4211 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4212 		device_xname(sc->sc_dev), __func__));
   4213 
   4214 	/* Wait for eeprom to reload */
   4215 	switch (sc->sc_type) {
   4216 	case WM_T_82542_2_0:
   4217 	case WM_T_82542_2_1:
   4218 		/* null */
   4219 		break;
   4220 	case WM_T_82543:
   4221 	case WM_T_82544:
   4222 	case WM_T_82540:
   4223 	case WM_T_82545:
   4224 	case WM_T_82545_3:
   4225 	case WM_T_82546:
   4226 	case WM_T_82546_3:
   4227 	case WM_T_82541:
   4228 	case WM_T_82541_2:
   4229 	case WM_T_82547:
   4230 	case WM_T_82547_2:
   4231 	case WM_T_82573:
   4232 	case WM_T_82574:
   4233 	case WM_T_82583:
   4234 		/* generic */
   4235 		delay(10*1000);
   4236 		break;
   4237 	case WM_T_80003:
   4238 	case WM_T_82571:
   4239 	case WM_T_82572:
   4240 	case WM_T_82575:
   4241 	case WM_T_82576:
   4242 	case WM_T_82580:
   4243 	case WM_T_I350:
   4244 	case WM_T_I354:
   4245 	case WM_T_I210:
   4246 	case WM_T_I211:
   4247 		if (sc->sc_type == WM_T_82571) {
   4248 			/* Only 82571 shares port 0 */
   4249 			mask = EEMNGCTL_CFGDONE_0;
   4250 		} else
   4251 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4252 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4253 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4254 				break;
   4255 			delay(1000);
   4256 		}
   4257 		if (i >= WM_PHY_CFG_TIMEOUT)
   4258 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4259 				device_xname(sc->sc_dev), __func__));
   4260 		break;
   4261 	case WM_T_ICH8:
   4262 	case WM_T_ICH9:
   4263 	case WM_T_ICH10:
   4264 	case WM_T_PCH:
   4265 	case WM_T_PCH2:
   4266 	case WM_T_PCH_LPT:
   4267 	case WM_T_PCH_SPT:
   4268 	case WM_T_PCH_CNP:
   4269 		delay(10*1000);
   4270 		if (sc->sc_type >= WM_T_ICH10)
   4271 			wm_lan_init_done(sc);
   4272 		else
   4273 			wm_get_auto_rd_done(sc);
   4274 
   4275 		/* Clear PHY Reset Asserted bit */
   4276 		reg = CSR_READ(sc, WMREG_STATUS);
   4277 		if ((reg & STATUS_PHYRA) != 0)
   4278 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4279 		break;
   4280 	default:
   4281 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4282 		    __func__);
   4283 		break;
   4284 	}
   4285 }
   4286 
   4287 int
   4288 wm_phy_post_reset(struct wm_softc *sc)
   4289 {
   4290 	device_t dev = sc->sc_dev;
   4291 	uint16_t reg;
   4292 	int rv = 0;
   4293 
   4294 	/* This function is only for ICH8 and newer. */
   4295 	if (sc->sc_type < WM_T_ICH8)
   4296 		return 0;
   4297 
   4298 	if (wm_phy_resetisblocked(sc)) {
   4299 		/* XXX */
   4300 		device_printf(dev, "PHY is blocked\n");
   4301 		return -1;
   4302 	}
   4303 
   4304 	/* Allow time for h/w to get to quiescent state after reset */
   4305 	delay(10*1000);
   4306 
   4307 	/* Perform any necessary post-reset workarounds */
   4308 	if (sc->sc_type == WM_T_PCH)
   4309 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4310 	else if (sc->sc_type == WM_T_PCH2)
   4311 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4312 	if (rv != 0)
   4313 		return rv;
   4314 
   4315 	/* Clear the host wakeup bit after lcd reset */
   4316 	if (sc->sc_type >= WM_T_PCH) {
   4317 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4318 		reg &= ~BM_WUC_HOST_WU_BIT;
   4319 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4320 	}
   4321 
   4322 	/* Configure the LCD with the extended configuration region in NVM */
   4323 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4324 		return rv;
   4325 
   4326 	/* Configure the LCD with the OEM bits in NVM */
   4327 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4328 
   4329 	if (sc->sc_type == WM_T_PCH2) {
   4330 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4331 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4332 			delay(10 * 1000);
   4333 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4334 		}
   4335 		/* Set EEE LPI Update Timer to 200usec */
   4336 		rv = sc->phy.acquire(sc);
   4337 		if (rv)
   4338 			return rv;
   4339 		rv = wm_write_emi_reg_locked(dev,
   4340 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4341 		sc->phy.release(sc);
   4342 	}
   4343 
   4344 	return rv;
   4345 }
   4346 
   4347 /* Only for PCH and newer */
   4348 static int
   4349 wm_write_smbus_addr(struct wm_softc *sc)
   4350 {
   4351 	uint32_t strap, freq;
   4352 	uint16_t phy_data;
   4353 	int rv;
   4354 
   4355 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4356 		device_xname(sc->sc_dev), __func__));
   4357 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4358 
   4359 	strap = CSR_READ(sc, WMREG_STRAP);
   4360 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4361 
   4362 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4363 	if (rv != 0)
   4364 		return -1;
   4365 
   4366 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4367 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4368 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4369 
   4370 	if (sc->sc_phytype == WMPHY_I217) {
   4371 		/* Restore SMBus frequency */
   4372 		if (freq --) {
   4373 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4374 			    | HV_SMB_ADDR_FREQ_HIGH);
   4375 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4376 			    HV_SMB_ADDR_FREQ_LOW);
   4377 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4378 			    HV_SMB_ADDR_FREQ_HIGH);
   4379 		} else
   4380 			DPRINTF(sc, WM_DEBUG_INIT,
   4381 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4382 				device_xname(sc->sc_dev), __func__));
   4383 	}
   4384 
   4385 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4386 	    phy_data);
   4387 }
   4388 
   4389 static int
   4390 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4391 {
   4392 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4393 	uint16_t phy_page = 0;
   4394 	int rv = 0;
   4395 
   4396 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4397 		device_xname(sc->sc_dev), __func__));
   4398 
   4399 	switch (sc->sc_type) {
   4400 	case WM_T_ICH8:
   4401 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4402 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4403 			return 0;
   4404 
   4405 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4406 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4407 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4408 			break;
   4409 		}
   4410 		/* FALLTHROUGH */
   4411 	case WM_T_PCH:
   4412 	case WM_T_PCH2:
   4413 	case WM_T_PCH_LPT:
   4414 	case WM_T_PCH_SPT:
   4415 	case WM_T_PCH_CNP:
   4416 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4417 		break;
   4418 	default:
   4419 		return 0;
   4420 	}
   4421 
   4422 	if ((rv = sc->phy.acquire(sc)) != 0)
   4423 		return rv;
   4424 
   4425 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4426 	if ((reg & sw_cfg_mask) == 0)
   4427 		goto release;
   4428 
   4429 	/*
   4430 	 * Make sure HW does not configure LCD from PHY extended configuration
   4431 	 * before SW configuration
   4432 	 */
   4433 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4434 	if ((sc->sc_type < WM_T_PCH2)
   4435 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4436 		goto release;
   4437 
   4438 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4439 		device_xname(sc->sc_dev), __func__));
   4440 	/* word_addr is in DWORD */
   4441 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4442 
   4443 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4444 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4445 	if (cnf_size == 0)
   4446 		goto release;
   4447 
   4448 	if (((sc->sc_type == WM_T_PCH)
   4449 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4450 	    || (sc->sc_type > WM_T_PCH)) {
   4451 		/*
   4452 		 * HW configures the SMBus address and LEDs when the OEM and
   4453 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4454 		 * are cleared, SW will configure them instead.
   4455 		 */
   4456 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4457 			device_xname(sc->sc_dev), __func__));
   4458 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4459 			goto release;
   4460 
   4461 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4462 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4463 		    (uint16_t)reg);
   4464 		if (rv != 0)
   4465 			goto release;
   4466 	}
   4467 
   4468 	/* Configure LCD from extended configuration region. */
   4469 	for (i = 0; i < cnf_size; i++) {
   4470 		uint16_t reg_data, reg_addr;
   4471 
   4472 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4473 			goto release;
   4474 
   4475 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4476 			goto release;
   4477 
   4478 		if (reg_addr == IGPHY_PAGE_SELECT)
   4479 			phy_page = reg_data;
   4480 
   4481 		reg_addr &= IGPHY_MAXREGADDR;
   4482 		reg_addr |= phy_page;
   4483 
   4484 		KASSERT(sc->phy.writereg_locked != NULL);
   4485 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4486 		    reg_data);
   4487 	}
   4488 
   4489 release:
   4490 	sc->phy.release(sc);
   4491 	return rv;
   4492 }
   4493 
   4494 /*
   4495  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4496  *  @sc:       pointer to the HW structure
   4497  *  @d0_state: boolean if entering d0 or d3 device state
   4498  *
   4499  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4500  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4501  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4502  */
   4503 int
   4504 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4505 {
   4506 	uint32_t mac_reg;
   4507 	uint16_t oem_reg;
   4508 	int rv;
   4509 
   4510 	if (sc->sc_type < WM_T_PCH)
   4511 		return 0;
   4512 
   4513 	rv = sc->phy.acquire(sc);
   4514 	if (rv != 0)
   4515 		return rv;
   4516 
   4517 	if (sc->sc_type == WM_T_PCH) {
   4518 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4519 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4520 			goto release;
   4521 	}
   4522 
   4523 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4524 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4525 		goto release;
   4526 
   4527 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4528 
   4529 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4530 	if (rv != 0)
   4531 		goto release;
   4532 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4533 
   4534 	if (d0_state) {
   4535 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4536 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4537 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4538 			oem_reg |= HV_OEM_BITS_LPLU;
   4539 	} else {
   4540 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4541 		    != 0)
   4542 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4543 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4544 		    != 0)
   4545 			oem_reg |= HV_OEM_BITS_LPLU;
   4546 	}
   4547 
   4548 	/* Set Restart auto-neg to activate the bits */
   4549 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4550 	    && (wm_phy_resetisblocked(sc) == false))
   4551 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4552 
   4553 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4554 
   4555 release:
   4556 	sc->phy.release(sc);
   4557 
   4558 	return rv;
   4559 }
   4560 
   4561 /* Init hardware bits */
   4562 void
   4563 wm_initialize_hardware_bits(struct wm_softc *sc)
   4564 {
   4565 	uint32_t tarc0, tarc1, reg;
   4566 
   4567 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4568 		device_xname(sc->sc_dev), __func__));
   4569 
   4570 	/* For 82571 variant, 80003 and ICHs */
   4571 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4572 	    || (sc->sc_type >= WM_T_80003)) {
   4573 
   4574 		/* Transmit Descriptor Control 0 */
   4575 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4576 		reg |= TXDCTL_COUNT_DESC;
   4577 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4578 
   4579 		/* Transmit Descriptor Control 1 */
   4580 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4581 		reg |= TXDCTL_COUNT_DESC;
   4582 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4583 
   4584 		/* TARC0 */
   4585 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4586 		switch (sc->sc_type) {
   4587 		case WM_T_82571:
   4588 		case WM_T_82572:
   4589 		case WM_T_82573:
   4590 		case WM_T_82574:
   4591 		case WM_T_82583:
   4592 		case WM_T_80003:
   4593 			/* Clear bits 30..27 */
   4594 			tarc0 &= ~__BITS(30, 27);
   4595 			break;
   4596 		default:
   4597 			break;
   4598 		}
   4599 
   4600 		switch (sc->sc_type) {
   4601 		case WM_T_82571:
   4602 		case WM_T_82572:
   4603 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4604 
   4605 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4606 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4607 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4608 			/* 8257[12] Errata No.7 */
   4609 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4610 
   4611 			/* TARC1 bit 28 */
   4612 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4613 				tarc1 &= ~__BIT(28);
   4614 			else
   4615 				tarc1 |= __BIT(28);
   4616 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4617 
   4618 			/*
   4619 			 * 8257[12] Errata No.13
   4620 			 * Disable Dyamic Clock Gating.
   4621 			 */
   4622 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4623 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4624 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4625 			break;
   4626 		case WM_T_82573:
   4627 		case WM_T_82574:
   4628 		case WM_T_82583:
   4629 			if ((sc->sc_type == WM_T_82574)
   4630 			    || (sc->sc_type == WM_T_82583))
   4631 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4632 
   4633 			/* Extended Device Control */
   4634 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4635 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4636 			reg |= __BIT(22);	/* Set bit 22 */
   4637 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4638 
   4639 			/* Device Control */
   4640 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4641 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4642 
   4643 			/* PCIe Control Register */
   4644 			/*
   4645 			 * 82573 Errata (unknown).
   4646 			 *
   4647 			 * 82574 Errata 25 and 82583 Errata 12
   4648 			 * "Dropped Rx Packets":
   4649 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4650 			 */
   4651 			reg = CSR_READ(sc, WMREG_GCR);
   4652 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4653 			CSR_WRITE(sc, WMREG_GCR, reg);
   4654 
   4655 			if ((sc->sc_type == WM_T_82574)
   4656 			    || (sc->sc_type == WM_T_82583)) {
   4657 				/*
   4658 				 * Document says this bit must be set for
   4659 				 * proper operation.
   4660 				 */
   4661 				reg = CSR_READ(sc, WMREG_GCR);
   4662 				reg |= __BIT(22);
   4663 				CSR_WRITE(sc, WMREG_GCR, reg);
   4664 
   4665 				/*
   4666 				 * Apply workaround for hardware errata
   4667 				 * documented in errata docs Fixes issue where
   4668 				 * some error prone or unreliable PCIe
   4669 				 * completions are occurring, particularly
   4670 				 * with ASPM enabled. Without fix, issue can
   4671 				 * cause Tx timeouts.
   4672 				 */
   4673 				reg = CSR_READ(sc, WMREG_GCR2);
   4674 				reg |= __BIT(0);
   4675 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4676 			}
   4677 			break;
   4678 		case WM_T_80003:
   4679 			/* TARC0 */
   4680 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4681 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4682 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4683 
   4684 			/* TARC1 bit 28 */
   4685 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4686 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4687 				tarc1 &= ~__BIT(28);
   4688 			else
   4689 				tarc1 |= __BIT(28);
   4690 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4691 			break;
   4692 		case WM_T_ICH8:
   4693 		case WM_T_ICH9:
   4694 		case WM_T_ICH10:
   4695 		case WM_T_PCH:
   4696 		case WM_T_PCH2:
   4697 		case WM_T_PCH_LPT:
   4698 		case WM_T_PCH_SPT:
   4699 		case WM_T_PCH_CNP:
   4700 			/* TARC0 */
   4701 			if (sc->sc_type == WM_T_ICH8) {
   4702 				/* Set TARC0 bits 29 and 28 */
   4703 				tarc0 |= __BITS(29, 28);
   4704 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4705 				tarc0 |= __BIT(29);
   4706 				/*
   4707 				 *  Drop bit 28. From Linux.
   4708 				 * See I218/I219 spec update
   4709 				 * "5. Buffer Overrun While the I219 is
   4710 				 * Processing DMA Transactions"
   4711 				 */
   4712 				tarc0 &= ~__BIT(28);
   4713 			}
   4714 			/* Set TARC0 bits 23,24,26,27 */
   4715 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4716 
   4717 			/* CTRL_EXT */
   4718 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4719 			reg |= __BIT(22);	/* Set bit 22 */
   4720 			/*
   4721 			 * Enable PHY low-power state when MAC is at D3
   4722 			 * w/o WoL
   4723 			 */
   4724 			if (sc->sc_type >= WM_T_PCH)
   4725 				reg |= CTRL_EXT_PHYPDEN;
   4726 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4727 
   4728 			/* TARC1 */
   4729 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4730 			/* bit 28 */
   4731 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4732 				tarc1 &= ~__BIT(28);
   4733 			else
   4734 				tarc1 |= __BIT(28);
   4735 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4736 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4737 
   4738 			/* Device Status */
   4739 			if (sc->sc_type == WM_T_ICH8) {
   4740 				reg = CSR_READ(sc, WMREG_STATUS);
   4741 				reg &= ~__BIT(31);
   4742 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4743 
   4744 			}
   4745 
   4746 			/* IOSFPC */
   4747 			if (sc->sc_type == WM_T_PCH_SPT) {
   4748 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4749 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4750 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4751 			}
   4752 			/*
   4753 			 * Work-around descriptor data corruption issue during
   4754 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4755 			 * capability.
   4756 			 */
   4757 			reg = CSR_READ(sc, WMREG_RFCTL);
   4758 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4759 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4760 			break;
   4761 		default:
   4762 			break;
   4763 		}
   4764 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4765 
   4766 		switch (sc->sc_type) {
   4767 		/*
   4768 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4769 		 * Avoid RSS Hash Value bug.
   4770 		 */
   4771 		case WM_T_82571:
   4772 		case WM_T_82572:
   4773 		case WM_T_82573:
   4774 		case WM_T_80003:
   4775 		case WM_T_ICH8:
   4776 			reg = CSR_READ(sc, WMREG_RFCTL);
   4777 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4778 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4779 			break;
   4780 		case WM_T_82574:
   4781 			/* Use extened Rx descriptor. */
   4782 			reg = CSR_READ(sc, WMREG_RFCTL);
   4783 			reg |= WMREG_RFCTL_EXSTEN;
   4784 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4785 			break;
   4786 		default:
   4787 			break;
   4788 		}
   4789 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4790 		/*
   4791 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4792 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4793 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4794 		 * Correctly by the Device"
   4795 		 *
   4796 		 * I354(C2000) Errata AVR53:
   4797 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4798 		 * Hang"
   4799 		 */
   4800 		reg = CSR_READ(sc, WMREG_RFCTL);
   4801 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4802 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4803 	}
   4804 }
   4805 
   4806 static uint32_t
   4807 wm_rxpbs_adjust_82580(uint32_t val)
   4808 {
   4809 	uint32_t rv = 0;
   4810 
   4811 	if (val < __arraycount(wm_82580_rxpbs_table))
   4812 		rv = wm_82580_rxpbs_table[val];
   4813 
   4814 	return rv;
   4815 }
   4816 
   4817 /*
   4818  * wm_reset_phy:
   4819  *
   4820  *	generic PHY reset function.
   4821  *	Same as e1000_phy_hw_reset_generic()
   4822  */
   4823 static int
   4824 wm_reset_phy(struct wm_softc *sc)
   4825 {
   4826 	uint32_t reg;
   4827 
   4828 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4829 		device_xname(sc->sc_dev), __func__));
   4830 	if (wm_phy_resetisblocked(sc))
   4831 		return -1;
   4832 
   4833 	sc->phy.acquire(sc);
   4834 
   4835 	reg = CSR_READ(sc, WMREG_CTRL);
   4836 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4837 	CSR_WRITE_FLUSH(sc);
   4838 
   4839 	delay(sc->phy.reset_delay_us);
   4840 
   4841 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4842 	CSR_WRITE_FLUSH(sc);
   4843 
   4844 	delay(150);
   4845 
   4846 	sc->phy.release(sc);
   4847 
   4848 	wm_get_cfg_done(sc);
   4849 	wm_phy_post_reset(sc);
   4850 
   4851 	return 0;
   4852 }
   4853 
   4854 /*
   4855  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4856  * so it is enough to check sc->sc_queue[0] only.
   4857  */
   4858 static void
   4859 wm_flush_desc_rings(struct wm_softc *sc)
   4860 {
   4861 	pcireg_t preg;
   4862 	uint32_t reg;
   4863 	struct wm_txqueue *txq;
   4864 	wiseman_txdesc_t *txd;
   4865 	int nexttx;
   4866 	uint32_t rctl;
   4867 
   4868 	/* First, disable MULR fix in FEXTNVM11 */
   4869 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4870 	reg |= FEXTNVM11_DIS_MULRFIX;
   4871 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4872 
   4873 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4874 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4875 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4876 		return;
   4877 
   4878 	/* TX */
   4879 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4880 	    preg, reg);
   4881 	reg = CSR_READ(sc, WMREG_TCTL);
   4882 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4883 
   4884 	txq = &sc->sc_queue[0].wmq_txq;
   4885 	nexttx = txq->txq_next;
   4886 	txd = &txq->txq_descs[nexttx];
   4887 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4888 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4889 	txd->wtx_fields.wtxu_status = 0;
   4890 	txd->wtx_fields.wtxu_options = 0;
   4891 	txd->wtx_fields.wtxu_vlan = 0;
   4892 
   4893 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4894 	    BUS_SPACE_BARRIER_WRITE);
   4895 
   4896 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4897 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4898 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4899 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4900 	delay(250);
   4901 
   4902 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4903 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4904 		return;
   4905 
   4906 	/* RX */
   4907 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4908 	rctl = CSR_READ(sc, WMREG_RCTL);
   4909 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4910 	CSR_WRITE_FLUSH(sc);
   4911 	delay(150);
   4912 
   4913 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4914 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4915 	reg &= 0xffffc000;
   4916 	/*
   4917 	 * Update thresholds: prefetch threshold to 31, host threshold
   4918 	 * to 1 and make sure the granularity is "descriptors" and not
   4919 	 * "cache lines"
   4920 	 */
   4921 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4922 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4923 
   4924 	/* Momentarily enable the RX ring for the changes to take effect */
   4925 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4926 	CSR_WRITE_FLUSH(sc);
   4927 	delay(150);
   4928 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4929 }
   4930 
   4931 /*
   4932  * wm_reset:
   4933  *
   4934  *	Reset the i82542 chip.
   4935  */
   4936 static void
   4937 wm_reset(struct wm_softc *sc)
   4938 {
   4939 	int phy_reset = 0;
   4940 	int i, error = 0;
   4941 	uint32_t reg;
   4942 	uint16_t kmreg;
   4943 	int rv;
   4944 
   4945 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4946 		device_xname(sc->sc_dev), __func__));
   4947 	KASSERT(sc->sc_type != 0);
   4948 
   4949 	/*
   4950 	 * Allocate on-chip memory according to the MTU size.
   4951 	 * The Packet Buffer Allocation register must be written
   4952 	 * before the chip is reset.
   4953 	 */
   4954 	switch (sc->sc_type) {
   4955 	case WM_T_82547:
   4956 	case WM_T_82547_2:
   4957 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4958 		    PBA_22K : PBA_30K;
   4959 		for (i = 0; i < sc->sc_nqueues; i++) {
   4960 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4961 			txq->txq_fifo_head = 0;
   4962 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4963 			txq->txq_fifo_size =
   4964 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4965 			txq->txq_fifo_stall = 0;
   4966 		}
   4967 		break;
   4968 	case WM_T_82571:
   4969 	case WM_T_82572:
   4970 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4971 	case WM_T_80003:
   4972 		sc->sc_pba = PBA_32K;
   4973 		break;
   4974 	case WM_T_82573:
   4975 		sc->sc_pba = PBA_12K;
   4976 		break;
   4977 	case WM_T_82574:
   4978 	case WM_T_82583:
   4979 		sc->sc_pba = PBA_20K;
   4980 		break;
   4981 	case WM_T_82576:
   4982 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4983 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4984 		break;
   4985 	case WM_T_82580:
   4986 	case WM_T_I350:
   4987 	case WM_T_I354:
   4988 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4989 		break;
   4990 	case WM_T_I210:
   4991 	case WM_T_I211:
   4992 		sc->sc_pba = PBA_34K;
   4993 		break;
   4994 	case WM_T_ICH8:
   4995 		/* Workaround for a bit corruption issue in FIFO memory */
   4996 		sc->sc_pba = PBA_8K;
   4997 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4998 		break;
   4999 	case WM_T_ICH9:
   5000 	case WM_T_ICH10:
   5001 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5002 		    PBA_14K : PBA_10K;
   5003 		break;
   5004 	case WM_T_PCH:
   5005 	case WM_T_PCH2:	/* XXX 14K? */
   5006 	case WM_T_PCH_LPT:
   5007 	case WM_T_PCH_SPT:
   5008 	case WM_T_PCH_CNP:
   5009 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5010 		    PBA_12K : PBA_26K;
   5011 		break;
   5012 	default:
   5013 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5014 		    PBA_40K : PBA_48K;
   5015 		break;
   5016 	}
   5017 	/*
   5018 	 * Only old or non-multiqueue devices have the PBA register
   5019 	 * XXX Need special handling for 82575.
   5020 	 */
   5021 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5022 	    || (sc->sc_type == WM_T_82575))
   5023 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5024 
   5025 	/* Prevent the PCI-E bus from sticking */
   5026 	if (sc->sc_flags & WM_F_PCIE) {
   5027 		int timeout = 800;
   5028 
   5029 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5030 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5031 
   5032 		while (timeout--) {
   5033 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5034 			    == 0)
   5035 				break;
   5036 			delay(100);
   5037 		}
   5038 		if (timeout == 0)
   5039 			device_printf(sc->sc_dev,
   5040 			    "failed to disable busmastering\n");
   5041 	}
   5042 
   5043 	/* Set the completion timeout for interface */
   5044 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5045 	    || (sc->sc_type == WM_T_82580)
   5046 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5047 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5048 		wm_set_pcie_completion_timeout(sc);
   5049 
   5050 	/* Clear interrupt */
   5051 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5052 	if (wm_is_using_msix(sc)) {
   5053 		if (sc->sc_type != WM_T_82574) {
   5054 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5055 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5056 		} else
   5057 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5058 	}
   5059 
   5060 	/* Stop the transmit and receive processes. */
   5061 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5062 	sc->sc_rctl &= ~RCTL_EN;
   5063 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5064 	CSR_WRITE_FLUSH(sc);
   5065 
   5066 	/* XXX set_tbi_sbp_82543() */
   5067 
   5068 	delay(10*1000);
   5069 
   5070 	/* Must acquire the MDIO ownership before MAC reset */
   5071 	switch (sc->sc_type) {
   5072 	case WM_T_82573:
   5073 	case WM_T_82574:
   5074 	case WM_T_82583:
   5075 		error = wm_get_hw_semaphore_82573(sc);
   5076 		break;
   5077 	default:
   5078 		break;
   5079 	}
   5080 
   5081 	/*
   5082 	 * 82541 Errata 29? & 82547 Errata 28?
   5083 	 * See also the description about PHY_RST bit in CTRL register
   5084 	 * in 8254x_GBe_SDM.pdf.
   5085 	 */
   5086 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5087 		CSR_WRITE(sc, WMREG_CTRL,
   5088 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5089 		CSR_WRITE_FLUSH(sc);
   5090 		delay(5000);
   5091 	}
   5092 
   5093 	switch (sc->sc_type) {
   5094 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5095 	case WM_T_82541:
   5096 	case WM_T_82541_2:
   5097 	case WM_T_82547:
   5098 	case WM_T_82547_2:
   5099 		/*
   5100 		 * On some chipsets, a reset through a memory-mapped write
   5101 		 * cycle can cause the chip to reset before completing the
   5102 		 * write cycle. This causes major headache that can be avoided
   5103 		 * by issuing the reset via indirect register writes through
   5104 		 * I/O space.
   5105 		 *
   5106 		 * So, if we successfully mapped the I/O BAR at attach time,
   5107 		 * use that. Otherwise, try our luck with a memory-mapped
   5108 		 * reset.
   5109 		 */
   5110 		if (sc->sc_flags & WM_F_IOH_VALID)
   5111 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5112 		else
   5113 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5114 		break;
   5115 	case WM_T_82545_3:
   5116 	case WM_T_82546_3:
   5117 		/* Use the shadow control register on these chips. */
   5118 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5119 		break;
   5120 	case WM_T_80003:
   5121 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5122 		sc->phy.acquire(sc);
   5123 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5124 		sc->phy.release(sc);
   5125 		break;
   5126 	case WM_T_ICH8:
   5127 	case WM_T_ICH9:
   5128 	case WM_T_ICH10:
   5129 	case WM_T_PCH:
   5130 	case WM_T_PCH2:
   5131 	case WM_T_PCH_LPT:
   5132 	case WM_T_PCH_SPT:
   5133 	case WM_T_PCH_CNP:
   5134 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5135 		if (wm_phy_resetisblocked(sc) == false) {
   5136 			/*
   5137 			 * Gate automatic PHY configuration by hardware on
   5138 			 * non-managed 82579
   5139 			 */
   5140 			if ((sc->sc_type == WM_T_PCH2)
   5141 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5142 				== 0))
   5143 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5144 
   5145 			reg |= CTRL_PHY_RESET;
   5146 			phy_reset = 1;
   5147 		} else
   5148 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5149 		sc->phy.acquire(sc);
   5150 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5151 		/* Don't insert a completion barrier when reset */
   5152 		delay(20*1000);
   5153 		mutex_exit(sc->sc_ich_phymtx);
   5154 		break;
   5155 	case WM_T_82580:
   5156 	case WM_T_I350:
   5157 	case WM_T_I354:
   5158 	case WM_T_I210:
   5159 	case WM_T_I211:
   5160 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5161 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5162 			CSR_WRITE_FLUSH(sc);
   5163 		delay(5000);
   5164 		break;
   5165 	case WM_T_82542_2_0:
   5166 	case WM_T_82542_2_1:
   5167 	case WM_T_82543:
   5168 	case WM_T_82540:
   5169 	case WM_T_82545:
   5170 	case WM_T_82546:
   5171 	case WM_T_82571:
   5172 	case WM_T_82572:
   5173 	case WM_T_82573:
   5174 	case WM_T_82574:
   5175 	case WM_T_82575:
   5176 	case WM_T_82576:
   5177 	case WM_T_82583:
   5178 	default:
   5179 		/* Everything else can safely use the documented method. */
   5180 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5181 		break;
   5182 	}
   5183 
   5184 	/* Must release the MDIO ownership after MAC reset */
   5185 	switch (sc->sc_type) {
   5186 	case WM_T_82573:
   5187 	case WM_T_82574:
   5188 	case WM_T_82583:
   5189 		if (error == 0)
   5190 			wm_put_hw_semaphore_82573(sc);
   5191 		break;
   5192 	default:
   5193 		break;
   5194 	}
   5195 
   5196 	/* Set Phy Config Counter to 50msec */
   5197 	if (sc->sc_type == WM_T_PCH2) {
   5198 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5199 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5200 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5201 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5202 	}
   5203 
   5204 	if (phy_reset != 0)
   5205 		wm_get_cfg_done(sc);
   5206 
   5207 	/* Reload EEPROM */
   5208 	switch (sc->sc_type) {
   5209 	case WM_T_82542_2_0:
   5210 	case WM_T_82542_2_1:
   5211 	case WM_T_82543:
   5212 	case WM_T_82544:
   5213 		delay(10);
   5214 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5215 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5216 		CSR_WRITE_FLUSH(sc);
   5217 		delay(2000);
   5218 		break;
   5219 	case WM_T_82540:
   5220 	case WM_T_82545:
   5221 	case WM_T_82545_3:
   5222 	case WM_T_82546:
   5223 	case WM_T_82546_3:
   5224 		delay(5*1000);
   5225 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5226 		break;
   5227 	case WM_T_82541:
   5228 	case WM_T_82541_2:
   5229 	case WM_T_82547:
   5230 	case WM_T_82547_2:
   5231 		delay(20000);
   5232 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5233 		break;
   5234 	case WM_T_82571:
   5235 	case WM_T_82572:
   5236 	case WM_T_82573:
   5237 	case WM_T_82574:
   5238 	case WM_T_82583:
   5239 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5240 			delay(10);
   5241 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5242 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5243 			CSR_WRITE_FLUSH(sc);
   5244 		}
   5245 		/* check EECD_EE_AUTORD */
   5246 		wm_get_auto_rd_done(sc);
   5247 		/*
   5248 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5249 		 * is set.
   5250 		 */
   5251 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5252 		    || (sc->sc_type == WM_T_82583))
   5253 			delay(25*1000);
   5254 		break;
   5255 	case WM_T_82575:
   5256 	case WM_T_82576:
   5257 	case WM_T_82580:
   5258 	case WM_T_I350:
   5259 	case WM_T_I354:
   5260 	case WM_T_I210:
   5261 	case WM_T_I211:
   5262 	case WM_T_80003:
   5263 		/* check EECD_EE_AUTORD */
   5264 		wm_get_auto_rd_done(sc);
   5265 		break;
   5266 	case WM_T_ICH8:
   5267 	case WM_T_ICH9:
   5268 	case WM_T_ICH10:
   5269 	case WM_T_PCH:
   5270 	case WM_T_PCH2:
   5271 	case WM_T_PCH_LPT:
   5272 	case WM_T_PCH_SPT:
   5273 	case WM_T_PCH_CNP:
   5274 		break;
   5275 	default:
   5276 		panic("%s: unknown type\n", __func__);
   5277 	}
   5278 
   5279 	/* Check whether EEPROM is present or not */
   5280 	switch (sc->sc_type) {
   5281 	case WM_T_82575:
   5282 	case WM_T_82576:
   5283 	case WM_T_82580:
   5284 	case WM_T_I350:
   5285 	case WM_T_I354:
   5286 	case WM_T_ICH8:
   5287 	case WM_T_ICH9:
   5288 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5289 			/* Not found */
   5290 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5291 			if (sc->sc_type == WM_T_82575)
   5292 				wm_reset_init_script_82575(sc);
   5293 		}
   5294 		break;
   5295 	default:
   5296 		break;
   5297 	}
   5298 
   5299 	if (phy_reset != 0)
   5300 		wm_phy_post_reset(sc);
   5301 
   5302 	if ((sc->sc_type == WM_T_82580)
   5303 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5304 		/* Clear global device reset status bit */
   5305 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5306 	}
   5307 
   5308 	/* Clear any pending interrupt events. */
   5309 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5310 	reg = CSR_READ(sc, WMREG_ICR);
   5311 	if (wm_is_using_msix(sc)) {
   5312 		if (sc->sc_type != WM_T_82574) {
   5313 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5314 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5315 		} else
   5316 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5317 	}
   5318 
   5319 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5320 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5321 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5322 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5323 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5324 		reg |= KABGTXD_BGSQLBIAS;
   5325 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5326 	}
   5327 
   5328 	/* Reload sc_ctrl */
   5329 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5330 
   5331 	wm_set_eee(sc);
   5332 
   5333 	/*
   5334 	 * For PCH, this write will make sure that any noise will be detected
   5335 	 * as a CRC error and be dropped rather than show up as a bad packet
   5336 	 * to the DMA engine
   5337 	 */
   5338 	if (sc->sc_type == WM_T_PCH)
   5339 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5340 
   5341 	if (sc->sc_type >= WM_T_82544)
   5342 		CSR_WRITE(sc, WMREG_WUC, 0);
   5343 
   5344 	if (sc->sc_type < WM_T_82575)
   5345 		wm_disable_aspm(sc); /* Workaround for some chips */
   5346 
   5347 	wm_reset_mdicnfg_82580(sc);
   5348 
   5349 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5350 		wm_pll_workaround_i210(sc);
   5351 
   5352 	if (sc->sc_type == WM_T_80003) {
   5353 		/* Default to TRUE to enable the MDIC W/A */
   5354 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5355 
   5356 		rv = wm_kmrn_readreg(sc,
   5357 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5358 		if (rv == 0) {
   5359 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5360 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5361 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5362 			else
   5363 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5364 		}
   5365 	}
   5366 }
   5367 
   5368 /*
   5369  * wm_add_rxbuf:
   5370  *
   5371  *	Add a receive buffer to the indiciated descriptor.
   5372  */
   5373 static int
   5374 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5375 {
   5376 	struct wm_softc *sc = rxq->rxq_sc;
   5377 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5378 	struct mbuf *m;
   5379 	int error;
   5380 
   5381 	KASSERT(mutex_owned(rxq->rxq_lock));
   5382 
   5383 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5384 	if (m == NULL)
   5385 		return ENOBUFS;
   5386 
   5387 	MCLGET(m, M_DONTWAIT);
   5388 	if ((m->m_flags & M_EXT) == 0) {
   5389 		m_freem(m);
   5390 		return ENOBUFS;
   5391 	}
   5392 
   5393 	if (rxs->rxs_mbuf != NULL)
   5394 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5395 
   5396 	rxs->rxs_mbuf = m;
   5397 
   5398 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5399 	/*
   5400 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5401 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5402 	 */
   5403 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5404 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5405 	if (error) {
   5406 		/* XXX XXX XXX */
   5407 		aprint_error_dev(sc->sc_dev,
   5408 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5409 		panic("wm_add_rxbuf");
   5410 	}
   5411 
   5412 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5413 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5414 
   5415 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5416 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5417 			wm_init_rxdesc(rxq, idx);
   5418 	} else
   5419 		wm_init_rxdesc(rxq, idx);
   5420 
   5421 	return 0;
   5422 }
   5423 
   5424 /*
   5425  * wm_rxdrain:
   5426  *
   5427  *	Drain the receive queue.
   5428  */
   5429 static void
   5430 wm_rxdrain(struct wm_rxqueue *rxq)
   5431 {
   5432 	struct wm_softc *sc = rxq->rxq_sc;
   5433 	struct wm_rxsoft *rxs;
   5434 	int i;
   5435 
   5436 	KASSERT(mutex_owned(rxq->rxq_lock));
   5437 
   5438 	for (i = 0; i < WM_NRXDESC; i++) {
   5439 		rxs = &rxq->rxq_soft[i];
   5440 		if (rxs->rxs_mbuf != NULL) {
   5441 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5442 			m_freem(rxs->rxs_mbuf);
   5443 			rxs->rxs_mbuf = NULL;
   5444 		}
   5445 	}
   5446 }
   5447 
   5448 /*
   5449  * Setup registers for RSS.
   5450  *
   5451  * XXX not yet VMDq support
   5452  */
   5453 static void
   5454 wm_init_rss(struct wm_softc *sc)
   5455 {
   5456 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5457 	int i;
   5458 
   5459 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5460 
   5461 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5462 		unsigned int qid, reta_ent;
   5463 
   5464 		qid  = i % sc->sc_nqueues;
   5465 		switch (sc->sc_type) {
   5466 		case WM_T_82574:
   5467 			reta_ent = __SHIFTIN(qid,
   5468 			    RETA_ENT_QINDEX_MASK_82574);
   5469 			break;
   5470 		case WM_T_82575:
   5471 			reta_ent = __SHIFTIN(qid,
   5472 			    RETA_ENT_QINDEX1_MASK_82575);
   5473 			break;
   5474 		default:
   5475 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5476 			break;
   5477 		}
   5478 
   5479 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5480 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5481 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5482 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5483 	}
   5484 
   5485 	rss_getkey((uint8_t *)rss_key);
   5486 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5487 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5488 
   5489 	if (sc->sc_type == WM_T_82574)
   5490 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5491 	else
   5492 		mrqc = MRQC_ENABLE_RSS_MQ;
   5493 
   5494 	/*
   5495 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5496 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5497 	 */
   5498 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5499 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5500 #if 0
   5501 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5502 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5503 #endif
   5504 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5505 
   5506 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5507 }
   5508 
   5509 /*
   5510  * Adjust TX and RX queue numbers which the system actulally uses.
   5511  *
   5512  * The numbers are affected by below parameters.
   5513  *     - The nubmer of hardware queues
   5514  *     - The number of MSI-X vectors (= "nvectors" argument)
   5515  *     - ncpu
   5516  */
   5517 static void
   5518 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5519 {
   5520 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5521 
   5522 	if (nvectors < 2) {
   5523 		sc->sc_nqueues = 1;
   5524 		return;
   5525 	}
   5526 
   5527 	switch (sc->sc_type) {
   5528 	case WM_T_82572:
   5529 		hw_ntxqueues = 2;
   5530 		hw_nrxqueues = 2;
   5531 		break;
   5532 	case WM_T_82574:
   5533 		hw_ntxqueues = 2;
   5534 		hw_nrxqueues = 2;
   5535 		break;
   5536 	case WM_T_82575:
   5537 		hw_ntxqueues = 4;
   5538 		hw_nrxqueues = 4;
   5539 		break;
   5540 	case WM_T_82576:
   5541 		hw_ntxqueues = 16;
   5542 		hw_nrxqueues = 16;
   5543 		break;
   5544 	case WM_T_82580:
   5545 	case WM_T_I350:
   5546 	case WM_T_I354:
   5547 		hw_ntxqueues = 8;
   5548 		hw_nrxqueues = 8;
   5549 		break;
   5550 	case WM_T_I210:
   5551 		hw_ntxqueues = 4;
   5552 		hw_nrxqueues = 4;
   5553 		break;
   5554 	case WM_T_I211:
   5555 		hw_ntxqueues = 2;
   5556 		hw_nrxqueues = 2;
   5557 		break;
   5558 		/*
   5559 		 * As below ethernet controllers does not support MSI-X,
   5560 		 * this driver let them not use multiqueue.
   5561 		 *     - WM_T_80003
   5562 		 *     - WM_T_ICH8
   5563 		 *     - WM_T_ICH9
   5564 		 *     - WM_T_ICH10
   5565 		 *     - WM_T_PCH
   5566 		 *     - WM_T_PCH2
   5567 		 *     - WM_T_PCH_LPT
   5568 		 */
   5569 	default:
   5570 		hw_ntxqueues = 1;
   5571 		hw_nrxqueues = 1;
   5572 		break;
   5573 	}
   5574 
   5575 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5576 
   5577 	/*
   5578 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5579 	 * the number of queues used actually.
   5580 	 */
   5581 	if (nvectors < hw_nqueues + 1)
   5582 		sc->sc_nqueues = nvectors - 1;
   5583 	else
   5584 		sc->sc_nqueues = hw_nqueues;
   5585 
   5586 	/*
   5587 	 * As queues more then cpus cannot improve scaling, we limit
   5588 	 * the number of queues used actually.
   5589 	 */
   5590 	if (ncpu < sc->sc_nqueues)
   5591 		sc->sc_nqueues = ncpu;
   5592 }
   5593 
   5594 static inline bool
   5595 wm_is_using_msix(struct wm_softc *sc)
   5596 {
   5597 
   5598 	return (sc->sc_nintrs > 1);
   5599 }
   5600 
   5601 static inline bool
   5602 wm_is_using_multiqueue(struct wm_softc *sc)
   5603 {
   5604 
   5605 	return (sc->sc_nqueues > 1);
   5606 }
   5607 
   5608 static int
   5609 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5610 {
   5611 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5612 
   5613 	wmq->wmq_id = qidx;
   5614 	wmq->wmq_intr_idx = intr_idx;
   5615 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5616 	    wm_handle_queue, wmq);
   5617 	if (wmq->wmq_si != NULL)
   5618 		return 0;
   5619 
   5620 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5621 	    wmq->wmq_id);
   5622 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5623 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5624 	return ENOMEM;
   5625 }
   5626 
   5627 /*
   5628  * Both single interrupt MSI and INTx can use this function.
   5629  */
   5630 static int
   5631 wm_setup_legacy(struct wm_softc *sc)
   5632 {
   5633 	pci_chipset_tag_t pc = sc->sc_pc;
   5634 	const char *intrstr = NULL;
   5635 	char intrbuf[PCI_INTRSTR_LEN];
   5636 	int error;
   5637 
   5638 	error = wm_alloc_txrx_queues(sc);
   5639 	if (error) {
   5640 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5641 		    error);
   5642 		return ENOMEM;
   5643 	}
   5644 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5645 	    sizeof(intrbuf));
   5646 #ifdef WM_MPSAFE
   5647 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5648 #endif
   5649 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5650 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5651 	if (sc->sc_ihs[0] == NULL) {
   5652 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5653 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5654 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5655 		return ENOMEM;
   5656 	}
   5657 
   5658 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5659 	sc->sc_nintrs = 1;
   5660 
   5661 	return wm_softint_establish_queue(sc, 0, 0);
   5662 }
   5663 
   5664 static int
   5665 wm_setup_msix(struct wm_softc *sc)
   5666 {
   5667 	void *vih;
   5668 	kcpuset_t *affinity;
   5669 	int qidx, error, intr_idx, txrx_established;
   5670 	pci_chipset_tag_t pc = sc->sc_pc;
   5671 	const char *intrstr = NULL;
   5672 	char intrbuf[PCI_INTRSTR_LEN];
   5673 	char intr_xname[INTRDEVNAMEBUF];
   5674 
   5675 	if (sc->sc_nqueues < ncpu) {
   5676 		/*
   5677 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5678 		 * interrupts start from CPU#1.
   5679 		 */
   5680 		sc->sc_affinity_offset = 1;
   5681 	} else {
   5682 		/*
   5683 		 * In this case, this device use all CPUs. So, we unify
   5684 		 * affinitied cpu_index to msix vector number for readability.
   5685 		 */
   5686 		sc->sc_affinity_offset = 0;
   5687 	}
   5688 
   5689 	error = wm_alloc_txrx_queues(sc);
   5690 	if (error) {
   5691 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5692 		    error);
   5693 		return ENOMEM;
   5694 	}
   5695 
   5696 	kcpuset_create(&affinity, false);
   5697 	intr_idx = 0;
   5698 
   5699 	/*
   5700 	 * TX and RX
   5701 	 */
   5702 	txrx_established = 0;
   5703 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5704 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5705 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5706 
   5707 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5708 		    sizeof(intrbuf));
   5709 #ifdef WM_MPSAFE
   5710 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5711 		    PCI_INTR_MPSAFE, true);
   5712 #endif
   5713 		memset(intr_xname, 0, sizeof(intr_xname));
   5714 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5715 		    device_xname(sc->sc_dev), qidx);
   5716 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5717 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5718 		if (vih == NULL) {
   5719 			aprint_error_dev(sc->sc_dev,
   5720 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5721 			    intrstr ? " at " : "",
   5722 			    intrstr ? intrstr : "");
   5723 
   5724 			goto fail;
   5725 		}
   5726 		kcpuset_zero(affinity);
   5727 		/* Round-robin affinity */
   5728 		kcpuset_set(affinity, affinity_to);
   5729 		error = interrupt_distribute(vih, affinity, NULL);
   5730 		if (error == 0) {
   5731 			aprint_normal_dev(sc->sc_dev,
   5732 			    "for TX and RX interrupting at %s affinity to %u\n",
   5733 			    intrstr, affinity_to);
   5734 		} else {
   5735 			aprint_normal_dev(sc->sc_dev,
   5736 			    "for TX and RX interrupting at %s\n", intrstr);
   5737 		}
   5738 		sc->sc_ihs[intr_idx] = vih;
   5739 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5740 			goto fail;
   5741 		txrx_established++;
   5742 		intr_idx++;
   5743 	}
   5744 
   5745 	/* LINK */
   5746 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5747 	    sizeof(intrbuf));
   5748 #ifdef WM_MPSAFE
   5749 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5750 #endif
   5751 	memset(intr_xname, 0, sizeof(intr_xname));
   5752 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5753 	    device_xname(sc->sc_dev));
   5754 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5755 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5756 	if (vih == NULL) {
   5757 		aprint_error_dev(sc->sc_dev,
   5758 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5759 		    intrstr ? " at " : "",
   5760 		    intrstr ? intrstr : "");
   5761 
   5762 		goto fail;
   5763 	}
   5764 	/* Keep default affinity to LINK interrupt */
   5765 	aprint_normal_dev(sc->sc_dev,
   5766 	    "for LINK interrupting at %s\n", intrstr);
   5767 	sc->sc_ihs[intr_idx] = vih;
   5768 	sc->sc_link_intr_idx = intr_idx;
   5769 
   5770 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5771 	kcpuset_destroy(affinity);
   5772 	return 0;
   5773 
   5774  fail:
   5775 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5776 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5777 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5778 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5779 	}
   5780 
   5781 	kcpuset_destroy(affinity);
   5782 	return ENOMEM;
   5783 }
   5784 
   5785 static void
   5786 wm_unset_stopping_flags(struct wm_softc *sc)
   5787 {
   5788 	int i;
   5789 
   5790 	KASSERT(WM_CORE_LOCKED(sc));
   5791 
   5792 	/* Must unset stopping flags in ascending order. */
   5793 	for (i = 0; i < sc->sc_nqueues; i++) {
   5794 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5795 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5796 
   5797 		mutex_enter(txq->txq_lock);
   5798 		txq->txq_stopping = false;
   5799 		mutex_exit(txq->txq_lock);
   5800 
   5801 		mutex_enter(rxq->rxq_lock);
   5802 		rxq->rxq_stopping = false;
   5803 		mutex_exit(rxq->rxq_lock);
   5804 	}
   5805 
   5806 	sc->sc_core_stopping = false;
   5807 }
   5808 
   5809 static void
   5810 wm_set_stopping_flags(struct wm_softc *sc)
   5811 {
   5812 	int i;
   5813 
   5814 	KASSERT(WM_CORE_LOCKED(sc));
   5815 
   5816 	sc->sc_core_stopping = true;
   5817 
   5818 	/* Must set stopping flags in ascending order. */
   5819 	for (i = 0; i < sc->sc_nqueues; i++) {
   5820 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5821 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5822 
   5823 		mutex_enter(rxq->rxq_lock);
   5824 		rxq->rxq_stopping = true;
   5825 		mutex_exit(rxq->rxq_lock);
   5826 
   5827 		mutex_enter(txq->txq_lock);
   5828 		txq->txq_stopping = true;
   5829 		mutex_exit(txq->txq_lock);
   5830 	}
   5831 }
   5832 
   5833 /*
   5834  * Write interrupt interval value to ITR or EITR
   5835  */
   5836 static void
   5837 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5838 {
   5839 
   5840 	if (!wmq->wmq_set_itr)
   5841 		return;
   5842 
   5843 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5844 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5845 
   5846 		/*
   5847 		 * 82575 doesn't have CNT_INGR field.
   5848 		 * So, overwrite counter field by software.
   5849 		 */
   5850 		if (sc->sc_type == WM_T_82575)
   5851 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5852 		else
   5853 			eitr |= EITR_CNT_INGR;
   5854 
   5855 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5856 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5857 		/*
   5858 		 * 82574 has both ITR and EITR. SET EITR when we use
   5859 		 * the multi queue function with MSI-X.
   5860 		 */
   5861 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5862 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5863 	} else {
   5864 		KASSERT(wmq->wmq_id == 0);
   5865 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5866 	}
   5867 
   5868 	wmq->wmq_set_itr = false;
   5869 }
   5870 
   5871 /*
   5872  * TODO
   5873  * Below dynamic calculation of itr is almost the same as linux igb,
   5874  * however it does not fit to wm(4). So, we will have been disable AIM
   5875  * until we will find appropriate calculation of itr.
   5876  */
   5877 /*
   5878  * calculate interrupt interval value to be going to write register in
   5879  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5880  */
   5881 static void
   5882 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5883 {
   5884 #ifdef NOTYET
   5885 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5886 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5887 	uint32_t avg_size = 0;
   5888 	uint32_t new_itr;
   5889 
   5890 	if (rxq->rxq_packets)
   5891 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5892 	if (txq->txq_packets)
   5893 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5894 
   5895 	if (avg_size == 0) {
   5896 		new_itr = 450; /* restore default value */
   5897 		goto out;
   5898 	}
   5899 
   5900 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5901 	avg_size += 24;
   5902 
   5903 	/* Don't starve jumbo frames */
   5904 	avg_size = uimin(avg_size, 3000);
   5905 
   5906 	/* Give a little boost to mid-size frames */
   5907 	if ((avg_size > 300) && (avg_size < 1200))
   5908 		new_itr = avg_size / 3;
   5909 	else
   5910 		new_itr = avg_size / 2;
   5911 
   5912 out:
   5913 	/*
   5914 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5915 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5916 	 */
   5917 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5918 		new_itr *= 4;
   5919 
   5920 	if (new_itr != wmq->wmq_itr) {
   5921 		wmq->wmq_itr = new_itr;
   5922 		wmq->wmq_set_itr = true;
   5923 	} else
   5924 		wmq->wmq_set_itr = false;
   5925 
   5926 	rxq->rxq_packets = 0;
   5927 	rxq->rxq_bytes = 0;
   5928 	txq->txq_packets = 0;
   5929 	txq->txq_bytes = 0;
   5930 #endif
   5931 }
   5932 
   5933 static void
   5934 wm_init_sysctls(struct wm_softc *sc)
   5935 {
   5936 	struct sysctllog **log;
   5937 	const struct sysctlnode *rnode, *qnode, *cnode;
   5938 	int i, rv;
   5939 	const char *dvname;
   5940 
   5941 	log = &sc->sc_sysctllog;
   5942 	dvname = device_xname(sc->sc_dev);
   5943 
   5944 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5945 	    0, CTLTYPE_NODE, dvname,
   5946 	    SYSCTL_DESCR("wm information and settings"),
   5947 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5948 	if (rv != 0)
   5949 		goto err;
   5950 
   5951 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5952 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5953 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5954 	if (rv != 0)
   5955 		goto teardown;
   5956 
   5957 	for (i = 0; i < sc->sc_nqueues; i++) {
   5958 		struct wm_queue *wmq = &sc->sc_queue[i];
   5959 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5960 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5961 
   5962 		snprintf(sc->sc_queue[i].sysctlname,
   5963 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   5964 
   5965 		if (sysctl_createv(log, 0, &rnode, &qnode,
   5966 		    0, CTLTYPE_NODE,
   5967 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   5968 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5969 			break;
   5970 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5971 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5972 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   5973 		    NULL, 0, &txq->txq_free,
   5974 		    0, CTL_CREATE, CTL_EOL) != 0)
   5975 			break;
   5976 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5977 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5978 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   5979 		    NULL, 0, &txq->txq_next,
   5980 		    0, CTL_CREATE, CTL_EOL) != 0)
   5981 			break;
   5982 
   5983 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5984 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5985 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   5986 		    NULL, 0, &rxq->rxq_ptr,
   5987 		    0, CTL_CREATE, CTL_EOL) != 0)
   5988 			break;
   5989 	}
   5990 
   5991 #ifdef WM_DEBUG
   5992 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5993 	    CTLTYPE_INT, "debug_flags",
   5994 	    SYSCTL_DESCR(
   5995 		    "Debug flags:\n"	\
   5996 		    "\t0x01 LINK\n"	\
   5997 		    "\t0x02 TX\n"	\
   5998 		    "\t0x04 RX\n"	\
   5999 		    "\t0x08 GMII\n"	\
   6000 		    "\t0x10 MANAGE\n"	\
   6001 		    "\t0x20 NVM\n"	\
   6002 		    "\t0x40 INIT\n"	\
   6003 		    "\t0x80 LOCK"),
   6004 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6005 	if (rv != 0)
   6006 		goto teardown;
   6007 #endif
   6008 
   6009 	return;
   6010 
   6011 teardown:
   6012 	sysctl_teardown(log);
   6013 err:
   6014 	sc->sc_sysctllog = NULL;
   6015 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6016 	    __func__, rv);
   6017 }
   6018 
   6019 /*
   6020  * wm_init:		[ifnet interface function]
   6021  *
   6022  *	Initialize the interface.
   6023  */
   6024 static int
   6025 wm_init(struct ifnet *ifp)
   6026 {
   6027 	struct wm_softc *sc = ifp->if_softc;
   6028 	int ret;
   6029 
   6030 	WM_CORE_LOCK(sc);
   6031 	ret = wm_init_locked(ifp);
   6032 	WM_CORE_UNLOCK(sc);
   6033 
   6034 	return ret;
   6035 }
   6036 
   6037 static int
   6038 wm_init_locked(struct ifnet *ifp)
   6039 {
   6040 	struct wm_softc *sc = ifp->if_softc;
   6041 	struct ethercom *ec = &sc->sc_ethercom;
   6042 	int i, j, trynum, error = 0;
   6043 	uint32_t reg, sfp_mask = 0;
   6044 
   6045 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6046 		device_xname(sc->sc_dev), __func__));
   6047 	KASSERT(WM_CORE_LOCKED(sc));
   6048 
   6049 	/*
   6050 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6051 	 * There is a small but measurable benefit to avoiding the adjusment
   6052 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6053 	 * on such platforms.  One possibility is that the DMA itself is
   6054 	 * slightly more efficient if the front of the entire packet (instead
   6055 	 * of the front of the headers) is aligned.
   6056 	 *
   6057 	 * Note we must always set align_tweak to 0 if we are using
   6058 	 * jumbo frames.
   6059 	 */
   6060 #ifdef __NO_STRICT_ALIGNMENT
   6061 	sc->sc_align_tweak = 0;
   6062 #else
   6063 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6064 		sc->sc_align_tweak = 0;
   6065 	else
   6066 		sc->sc_align_tweak = 2;
   6067 #endif /* __NO_STRICT_ALIGNMENT */
   6068 
   6069 	/* Cancel any pending I/O. */
   6070 	wm_stop_locked(ifp, false, false);
   6071 
   6072 	/* Update statistics before reset */
   6073 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6074 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6075 
   6076 	/* PCH_SPT hardware workaround */
   6077 	if (sc->sc_type == WM_T_PCH_SPT)
   6078 		wm_flush_desc_rings(sc);
   6079 
   6080 	/* Reset the chip to a known state. */
   6081 	wm_reset(sc);
   6082 
   6083 	/*
   6084 	 * AMT based hardware can now take control from firmware
   6085 	 * Do this after reset.
   6086 	 */
   6087 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6088 		wm_get_hw_control(sc);
   6089 
   6090 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6091 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6092 		wm_legacy_irq_quirk_spt(sc);
   6093 
   6094 	/* Init hardware bits */
   6095 	wm_initialize_hardware_bits(sc);
   6096 
   6097 	/* Reset the PHY. */
   6098 	if (sc->sc_flags & WM_F_HAS_MII)
   6099 		wm_gmii_reset(sc);
   6100 
   6101 	if (sc->sc_type >= WM_T_ICH8) {
   6102 		reg = CSR_READ(sc, WMREG_GCR);
   6103 		/*
   6104 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6105 		 * default after reset.
   6106 		 */
   6107 		if (sc->sc_type == WM_T_ICH8)
   6108 			reg |= GCR_NO_SNOOP_ALL;
   6109 		else
   6110 			reg &= ~GCR_NO_SNOOP_ALL;
   6111 		CSR_WRITE(sc, WMREG_GCR, reg);
   6112 	}
   6113 
   6114 	if ((sc->sc_type >= WM_T_ICH8)
   6115 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6116 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6117 
   6118 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6119 		reg |= CTRL_EXT_RO_DIS;
   6120 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6121 	}
   6122 
   6123 	/* Calculate (E)ITR value */
   6124 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6125 		/*
   6126 		 * For NEWQUEUE's EITR (except for 82575).
   6127 		 * 82575's EITR should be set same throttling value as other
   6128 		 * old controllers' ITR because the interrupt/sec calculation
   6129 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6130 		 *
   6131 		 * 82574's EITR should be set same throttling value as ITR.
   6132 		 *
   6133 		 * For N interrupts/sec, set this value to:
   6134 		 * 1,000,000 / N in contrast to ITR throttoling value.
   6135 		 */
   6136 		sc->sc_itr_init = 450;
   6137 	} else if (sc->sc_type >= WM_T_82543) {
   6138 		/*
   6139 		 * Set up the interrupt throttling register (units of 256ns)
   6140 		 * Note that a footnote in Intel's documentation says this
   6141 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6142 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6143 		 * that that is also true for the 1024ns units of the other
   6144 		 * interrupt-related timer registers -- so, really, we ought
   6145 		 * to divide this value by 4 when the link speed is low.
   6146 		 *
   6147 		 * XXX implement this division at link speed change!
   6148 		 */
   6149 
   6150 		/*
   6151 		 * For N interrupts/sec, set this value to:
   6152 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6153 		 * absolute and packet timer values to this value
   6154 		 * divided by 4 to get "simple timer" behavior.
   6155 		 */
   6156 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6157 	}
   6158 
   6159 	error = wm_init_txrx_queues(sc);
   6160 	if (error)
   6161 		goto out;
   6162 
   6163 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6164 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6165 	    (sc->sc_type >= WM_T_82575))
   6166 		wm_serdes_power_up_link_82575(sc);
   6167 
   6168 	/* Clear out the VLAN table -- we don't use it (yet). */
   6169 	CSR_WRITE(sc, WMREG_VET, 0);
   6170 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6171 		trynum = 10; /* Due to hw errata */
   6172 	else
   6173 		trynum = 1;
   6174 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6175 		for (j = 0; j < trynum; j++)
   6176 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6177 
   6178 	/*
   6179 	 * Set up flow-control parameters.
   6180 	 *
   6181 	 * XXX Values could probably stand some tuning.
   6182 	 */
   6183 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6184 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6185 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6186 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6187 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6188 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6189 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6190 	}
   6191 
   6192 	sc->sc_fcrtl = FCRTL_DFLT;
   6193 	if (sc->sc_type < WM_T_82543) {
   6194 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6195 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6196 	} else {
   6197 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6198 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6199 	}
   6200 
   6201 	if (sc->sc_type == WM_T_80003)
   6202 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6203 	else
   6204 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6205 
   6206 	/* Writes the control register. */
   6207 	wm_set_vlan(sc);
   6208 
   6209 	if (sc->sc_flags & WM_F_HAS_MII) {
   6210 		uint16_t kmreg;
   6211 
   6212 		switch (sc->sc_type) {
   6213 		case WM_T_80003:
   6214 		case WM_T_ICH8:
   6215 		case WM_T_ICH9:
   6216 		case WM_T_ICH10:
   6217 		case WM_T_PCH:
   6218 		case WM_T_PCH2:
   6219 		case WM_T_PCH_LPT:
   6220 		case WM_T_PCH_SPT:
   6221 		case WM_T_PCH_CNP:
   6222 			/*
   6223 			 * Set the mac to wait the maximum time between each
   6224 			 * iteration and increase the max iterations when
   6225 			 * polling the phy; this fixes erroneous timeouts at
   6226 			 * 10Mbps.
   6227 			 */
   6228 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6229 			    0xFFFF);
   6230 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6231 			    &kmreg);
   6232 			kmreg |= 0x3F;
   6233 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6234 			    kmreg);
   6235 			break;
   6236 		default:
   6237 			break;
   6238 		}
   6239 
   6240 		if (sc->sc_type == WM_T_80003) {
   6241 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6242 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6243 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6244 
   6245 			/* Bypass RX and TX FIFO's */
   6246 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6247 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6248 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6249 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6250 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6251 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6252 		}
   6253 	}
   6254 #if 0
   6255 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6256 #endif
   6257 
   6258 	/* Set up checksum offload parameters. */
   6259 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6260 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6261 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6262 		reg |= RXCSUM_IPOFL;
   6263 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6264 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6265 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6266 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6267 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6268 
   6269 	/* Set registers about MSI-X */
   6270 	if (wm_is_using_msix(sc)) {
   6271 		uint32_t ivar, qintr_idx;
   6272 		struct wm_queue *wmq;
   6273 		unsigned int qid;
   6274 
   6275 		if (sc->sc_type == WM_T_82575) {
   6276 			/* Interrupt control */
   6277 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6278 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6279 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6280 
   6281 			/* TX and RX */
   6282 			for (i = 0; i < sc->sc_nqueues; i++) {
   6283 				wmq = &sc->sc_queue[i];
   6284 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6285 				    EITR_TX_QUEUE(wmq->wmq_id)
   6286 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6287 			}
   6288 			/* Link status */
   6289 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6290 			    EITR_OTHER);
   6291 		} else if (sc->sc_type == WM_T_82574) {
   6292 			/* Interrupt control */
   6293 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6294 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6295 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6296 
   6297 			/*
   6298 			 * Workaround issue with spurious interrupts
   6299 			 * in MSI-X mode.
   6300 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6301 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6302 			 */
   6303 			reg = CSR_READ(sc, WMREG_RFCTL);
   6304 			reg |= WMREG_RFCTL_ACKDIS;
   6305 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6306 
   6307 			ivar = 0;
   6308 			/* TX and RX */
   6309 			for (i = 0; i < sc->sc_nqueues; i++) {
   6310 				wmq = &sc->sc_queue[i];
   6311 				qid = wmq->wmq_id;
   6312 				qintr_idx = wmq->wmq_intr_idx;
   6313 
   6314 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6315 				    IVAR_TX_MASK_Q_82574(qid));
   6316 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6317 				    IVAR_RX_MASK_Q_82574(qid));
   6318 			}
   6319 			/* Link status */
   6320 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6321 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6322 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6323 		} else {
   6324 			/* Interrupt control */
   6325 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6326 			    | GPIE_EIAME | GPIE_PBA);
   6327 
   6328 			switch (sc->sc_type) {
   6329 			case WM_T_82580:
   6330 			case WM_T_I350:
   6331 			case WM_T_I354:
   6332 			case WM_T_I210:
   6333 			case WM_T_I211:
   6334 				/* TX and RX */
   6335 				for (i = 0; i < sc->sc_nqueues; i++) {
   6336 					wmq = &sc->sc_queue[i];
   6337 					qid = wmq->wmq_id;
   6338 					qintr_idx = wmq->wmq_intr_idx;
   6339 
   6340 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6341 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6342 					ivar |= __SHIFTIN((qintr_idx
   6343 						| IVAR_VALID),
   6344 					    IVAR_TX_MASK_Q(qid));
   6345 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6346 					ivar |= __SHIFTIN((qintr_idx
   6347 						| IVAR_VALID),
   6348 					    IVAR_RX_MASK_Q(qid));
   6349 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6350 				}
   6351 				break;
   6352 			case WM_T_82576:
   6353 				/* TX and RX */
   6354 				for (i = 0; i < sc->sc_nqueues; i++) {
   6355 					wmq = &sc->sc_queue[i];
   6356 					qid = wmq->wmq_id;
   6357 					qintr_idx = wmq->wmq_intr_idx;
   6358 
   6359 					ivar = CSR_READ(sc,
   6360 					    WMREG_IVAR_Q_82576(qid));
   6361 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6362 					ivar |= __SHIFTIN((qintr_idx
   6363 						| IVAR_VALID),
   6364 					    IVAR_TX_MASK_Q_82576(qid));
   6365 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6366 					ivar |= __SHIFTIN((qintr_idx
   6367 						| IVAR_VALID),
   6368 					    IVAR_RX_MASK_Q_82576(qid));
   6369 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6370 					    ivar);
   6371 				}
   6372 				break;
   6373 			default:
   6374 				break;
   6375 			}
   6376 
   6377 			/* Link status */
   6378 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6379 			    IVAR_MISC_OTHER);
   6380 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6381 		}
   6382 
   6383 		if (wm_is_using_multiqueue(sc)) {
   6384 			wm_init_rss(sc);
   6385 
   6386 			/*
   6387 			** NOTE: Receive Full-Packet Checksum Offload
   6388 			** is mutually exclusive with Multiqueue. However
   6389 			** this is not the same as TCP/IP checksums which
   6390 			** still work.
   6391 			*/
   6392 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6393 			reg |= RXCSUM_PCSD;
   6394 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6395 		}
   6396 	}
   6397 
   6398 	/* Set up the interrupt registers. */
   6399 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6400 
   6401 	/* Enable SFP module insertion interrupt if it's required */
   6402 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6403 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6404 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6405 		sfp_mask = ICR_GPI(0);
   6406 	}
   6407 
   6408 	if (wm_is_using_msix(sc)) {
   6409 		uint32_t mask;
   6410 		struct wm_queue *wmq;
   6411 
   6412 		switch (sc->sc_type) {
   6413 		case WM_T_82574:
   6414 			mask = 0;
   6415 			for (i = 0; i < sc->sc_nqueues; i++) {
   6416 				wmq = &sc->sc_queue[i];
   6417 				mask |= ICR_TXQ(wmq->wmq_id);
   6418 				mask |= ICR_RXQ(wmq->wmq_id);
   6419 			}
   6420 			mask |= ICR_OTHER;
   6421 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6422 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6423 			break;
   6424 		default:
   6425 			if (sc->sc_type == WM_T_82575) {
   6426 				mask = 0;
   6427 				for (i = 0; i < sc->sc_nqueues; i++) {
   6428 					wmq = &sc->sc_queue[i];
   6429 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6430 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6431 				}
   6432 				mask |= EITR_OTHER;
   6433 			} else {
   6434 				mask = 0;
   6435 				for (i = 0; i < sc->sc_nqueues; i++) {
   6436 					wmq = &sc->sc_queue[i];
   6437 					mask |= 1 << wmq->wmq_intr_idx;
   6438 				}
   6439 				mask |= 1 << sc->sc_link_intr_idx;
   6440 			}
   6441 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6442 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6443 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6444 
   6445 			/* For other interrupts */
   6446 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6447 			break;
   6448 		}
   6449 	} else {
   6450 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6451 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6452 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6453 	}
   6454 
   6455 	/* Set up the inter-packet gap. */
   6456 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6457 
   6458 	if (sc->sc_type >= WM_T_82543) {
   6459 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6460 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6461 			wm_itrs_writereg(sc, wmq);
   6462 		}
   6463 		/*
   6464 		 * Link interrupts occur much less than TX
   6465 		 * interrupts and RX interrupts. So, we don't
   6466 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6467 		 * FreeBSD's if_igb.
   6468 		 */
   6469 	}
   6470 
   6471 	/* Set the VLAN ethernetype. */
   6472 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6473 
   6474 	/*
   6475 	 * Set up the transmit control register; we start out with
   6476 	 * a collision distance suitable for FDX, but update it whe
   6477 	 * we resolve the media type.
   6478 	 */
   6479 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6480 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6481 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6482 	if (sc->sc_type >= WM_T_82571)
   6483 		sc->sc_tctl |= TCTL_MULR;
   6484 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6485 
   6486 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6487 		/* Write TDT after TCTL.EN is set. See the document. */
   6488 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6489 	}
   6490 
   6491 	if (sc->sc_type == WM_T_80003) {
   6492 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6493 		reg &= ~TCTL_EXT_GCEX_MASK;
   6494 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6495 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6496 	}
   6497 
   6498 	/* Set the media. */
   6499 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6500 		goto out;
   6501 
   6502 	/* Configure for OS presence */
   6503 	wm_init_manageability(sc);
   6504 
   6505 	/*
   6506 	 * Set up the receive control register; we actually program the
   6507 	 * register when we set the receive filter. Use multicast address
   6508 	 * offset type 0.
   6509 	 *
   6510 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6511 	 * don't enable that feature.
   6512 	 */
   6513 	sc->sc_mchash_type = 0;
   6514 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6515 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6516 
   6517 	/* 82574 use one buffer extended Rx descriptor. */
   6518 	if (sc->sc_type == WM_T_82574)
   6519 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6520 
   6521 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6522 		sc->sc_rctl |= RCTL_SECRC;
   6523 
   6524 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6525 	    && (ifp->if_mtu > ETHERMTU)) {
   6526 		sc->sc_rctl |= RCTL_LPE;
   6527 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6528 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6529 	}
   6530 
   6531 	if (MCLBYTES == 2048)
   6532 		sc->sc_rctl |= RCTL_2k;
   6533 	else {
   6534 		if (sc->sc_type >= WM_T_82543) {
   6535 			switch (MCLBYTES) {
   6536 			case 4096:
   6537 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6538 				break;
   6539 			case 8192:
   6540 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6541 				break;
   6542 			case 16384:
   6543 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6544 				break;
   6545 			default:
   6546 				panic("wm_init: MCLBYTES %d unsupported",
   6547 				    MCLBYTES);
   6548 				break;
   6549 			}
   6550 		} else
   6551 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6552 	}
   6553 
   6554 	/* Enable ECC */
   6555 	switch (sc->sc_type) {
   6556 	case WM_T_82571:
   6557 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6558 		reg |= PBA_ECC_CORR_EN;
   6559 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6560 		break;
   6561 	case WM_T_PCH_LPT:
   6562 	case WM_T_PCH_SPT:
   6563 	case WM_T_PCH_CNP:
   6564 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6565 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6566 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6567 
   6568 		sc->sc_ctrl |= CTRL_MEHE;
   6569 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6570 		break;
   6571 	default:
   6572 		break;
   6573 	}
   6574 
   6575 	/*
   6576 	 * Set the receive filter.
   6577 	 *
   6578 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6579 	 * the setting of RCTL.EN in wm_set_filter()
   6580 	 */
   6581 	wm_set_filter(sc);
   6582 
   6583 	/* On 575 and later set RDT only if RX enabled */
   6584 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6585 		int qidx;
   6586 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6587 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6588 			for (i = 0; i < WM_NRXDESC; i++) {
   6589 				mutex_enter(rxq->rxq_lock);
   6590 				wm_init_rxdesc(rxq, i);
   6591 				mutex_exit(rxq->rxq_lock);
   6592 
   6593 			}
   6594 		}
   6595 	}
   6596 
   6597 	wm_unset_stopping_flags(sc);
   6598 
   6599 	/* Start the one second link check clock. */
   6600 	callout_schedule(&sc->sc_tick_ch, hz);
   6601 
   6602 	/* ...all done! */
   6603 	ifp->if_flags |= IFF_RUNNING;
   6604 
   6605  out:
   6606 	/* Save last flags for the callback */
   6607 	sc->sc_if_flags = ifp->if_flags;
   6608 	sc->sc_ec_capenable = ec->ec_capenable;
   6609 	if (error)
   6610 		log(LOG_ERR, "%s: interface not running\n",
   6611 		    device_xname(sc->sc_dev));
   6612 	return error;
   6613 }
   6614 
   6615 /*
   6616  * wm_stop:		[ifnet interface function]
   6617  *
   6618  *	Stop transmission on the interface.
   6619  */
   6620 static void
   6621 wm_stop(struct ifnet *ifp, int disable)
   6622 {
   6623 	struct wm_softc *sc = ifp->if_softc;
   6624 
   6625 	ASSERT_SLEEPABLE();
   6626 
   6627 	WM_CORE_LOCK(sc);
   6628 	wm_stop_locked(ifp, disable ? true : false, true);
   6629 	WM_CORE_UNLOCK(sc);
   6630 
   6631 	/*
   6632 	 * After wm_set_stopping_flags(), it is guaranteed
   6633 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6634 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6635 	 * because it can sleep...
   6636 	 * so, call workqueue_wait() here.
   6637 	 */
   6638 	for (int i = 0; i < sc->sc_nqueues; i++)
   6639 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6640 }
   6641 
   6642 static void
   6643 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6644 {
   6645 	struct wm_softc *sc = ifp->if_softc;
   6646 	struct wm_txsoft *txs;
   6647 	int i, qidx;
   6648 
   6649 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6650 		device_xname(sc->sc_dev), __func__));
   6651 	KASSERT(WM_CORE_LOCKED(sc));
   6652 
   6653 	wm_set_stopping_flags(sc);
   6654 
   6655 	if (sc->sc_flags & WM_F_HAS_MII) {
   6656 		/* Down the MII. */
   6657 		mii_down(&sc->sc_mii);
   6658 	} else {
   6659 #if 0
   6660 		/* Should we clear PHY's status properly? */
   6661 		wm_reset(sc);
   6662 #endif
   6663 	}
   6664 
   6665 	/* Stop the transmit and receive processes. */
   6666 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6667 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6668 	sc->sc_rctl &= ~RCTL_EN;
   6669 
   6670 	/*
   6671 	 * Clear the interrupt mask to ensure the device cannot assert its
   6672 	 * interrupt line.
   6673 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6674 	 * service any currently pending or shared interrupt.
   6675 	 */
   6676 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6677 	sc->sc_icr = 0;
   6678 	if (wm_is_using_msix(sc)) {
   6679 		if (sc->sc_type != WM_T_82574) {
   6680 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6681 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6682 		} else
   6683 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6684 	}
   6685 
   6686 	/*
   6687 	 * Stop callouts after interrupts are disabled; if we have
   6688 	 * to wait for them, we will be releasing the CORE_LOCK
   6689 	 * briefly, which will unblock interrupts on the current CPU.
   6690 	 */
   6691 
   6692 	/* Stop the one second clock. */
   6693 	if (wait)
   6694 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6695 	else
   6696 		callout_stop(&sc->sc_tick_ch);
   6697 
   6698 	/* Stop the 82547 Tx FIFO stall check timer. */
   6699 	if (sc->sc_type == WM_T_82547) {
   6700 		if (wait)
   6701 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6702 		else
   6703 			callout_stop(&sc->sc_txfifo_ch);
   6704 	}
   6705 
   6706 	/* Release any queued transmit buffers. */
   6707 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6708 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6709 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6710 		struct mbuf *m;
   6711 
   6712 		mutex_enter(txq->txq_lock);
   6713 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6714 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6715 			txs = &txq->txq_soft[i];
   6716 			if (txs->txs_mbuf != NULL) {
   6717 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6718 				m_freem(txs->txs_mbuf);
   6719 				txs->txs_mbuf = NULL;
   6720 			}
   6721 		}
   6722 		/* Drain txq_interq */
   6723 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6724 			m_freem(m);
   6725 		mutex_exit(txq->txq_lock);
   6726 	}
   6727 
   6728 	/* Mark the interface as down and cancel the watchdog timer. */
   6729 	ifp->if_flags &= ~IFF_RUNNING;
   6730 
   6731 	if (disable) {
   6732 		for (i = 0; i < sc->sc_nqueues; i++) {
   6733 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6734 			mutex_enter(rxq->rxq_lock);
   6735 			wm_rxdrain(rxq);
   6736 			mutex_exit(rxq->rxq_lock);
   6737 		}
   6738 	}
   6739 
   6740 #if 0 /* notyet */
   6741 	if (sc->sc_type >= WM_T_82544)
   6742 		CSR_WRITE(sc, WMREG_WUC, 0);
   6743 #endif
   6744 }
   6745 
   6746 static void
   6747 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6748 {
   6749 	struct mbuf *m;
   6750 	int i;
   6751 
   6752 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6753 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6754 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6755 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6756 		    m->m_data, m->m_len, m->m_flags);
   6757 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6758 	    i, i == 1 ? "" : "s");
   6759 }
   6760 
   6761 /*
   6762  * wm_82547_txfifo_stall:
   6763  *
   6764  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6765  *	reset the FIFO pointers, and restart packet transmission.
   6766  */
   6767 static void
   6768 wm_82547_txfifo_stall(void *arg)
   6769 {
   6770 	struct wm_softc *sc = arg;
   6771 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6772 
   6773 	mutex_enter(txq->txq_lock);
   6774 
   6775 	if (txq->txq_stopping)
   6776 		goto out;
   6777 
   6778 	if (txq->txq_fifo_stall) {
   6779 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6780 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6781 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6782 			/*
   6783 			 * Packets have drained.  Stop transmitter, reset
   6784 			 * FIFO pointers, restart transmitter, and kick
   6785 			 * the packet queue.
   6786 			 */
   6787 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6788 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6789 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6790 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6791 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6792 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6793 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6794 			CSR_WRITE_FLUSH(sc);
   6795 
   6796 			txq->txq_fifo_head = 0;
   6797 			txq->txq_fifo_stall = 0;
   6798 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6799 		} else {
   6800 			/*
   6801 			 * Still waiting for packets to drain; try again in
   6802 			 * another tick.
   6803 			 */
   6804 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6805 		}
   6806 	}
   6807 
   6808 out:
   6809 	mutex_exit(txq->txq_lock);
   6810 }
   6811 
   6812 /*
   6813  * wm_82547_txfifo_bugchk:
   6814  *
   6815  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6816  *	prevent enqueueing a packet that would wrap around the end
   6817  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6818  *
   6819  *	We do this by checking the amount of space before the end
   6820  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6821  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6822  *	the internal FIFO pointers to the beginning, and restart
   6823  *	transmission on the interface.
   6824  */
   6825 #define	WM_FIFO_HDR		0x10
   6826 #define	WM_82547_PAD_LEN	0x3e0
   6827 static int
   6828 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6829 {
   6830 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6831 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6832 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6833 
   6834 	/* Just return if already stalled. */
   6835 	if (txq->txq_fifo_stall)
   6836 		return 1;
   6837 
   6838 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6839 		/* Stall only occurs in half-duplex mode. */
   6840 		goto send_packet;
   6841 	}
   6842 
   6843 	if (len >= WM_82547_PAD_LEN + space) {
   6844 		txq->txq_fifo_stall = 1;
   6845 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6846 		return 1;
   6847 	}
   6848 
   6849  send_packet:
   6850 	txq->txq_fifo_head += len;
   6851 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6852 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6853 
   6854 	return 0;
   6855 }
   6856 
   6857 static int
   6858 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6859 {
   6860 	int error;
   6861 
   6862 	/*
   6863 	 * Allocate the control data structures, and create and load the
   6864 	 * DMA map for it.
   6865 	 *
   6866 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6867 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6868 	 * both sets within the same 4G segment.
   6869 	 */
   6870 	if (sc->sc_type < WM_T_82544)
   6871 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6872 	else
   6873 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6874 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6875 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6876 	else
   6877 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6878 
   6879 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6880 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6881 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6882 		aprint_error_dev(sc->sc_dev,
   6883 		    "unable to allocate TX control data, error = %d\n",
   6884 		    error);
   6885 		goto fail_0;
   6886 	}
   6887 
   6888 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6889 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6890 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6891 		aprint_error_dev(sc->sc_dev,
   6892 		    "unable to map TX control data, error = %d\n", error);
   6893 		goto fail_1;
   6894 	}
   6895 
   6896 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6897 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6898 		aprint_error_dev(sc->sc_dev,
   6899 		    "unable to create TX control data DMA map, error = %d\n",
   6900 		    error);
   6901 		goto fail_2;
   6902 	}
   6903 
   6904 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6905 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6906 		aprint_error_dev(sc->sc_dev,
   6907 		    "unable to load TX control data DMA map, error = %d\n",
   6908 		    error);
   6909 		goto fail_3;
   6910 	}
   6911 
   6912 	return 0;
   6913 
   6914  fail_3:
   6915 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6916  fail_2:
   6917 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6918 	    WM_TXDESCS_SIZE(txq));
   6919  fail_1:
   6920 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6921  fail_0:
   6922 	return error;
   6923 }
   6924 
   6925 static void
   6926 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6927 {
   6928 
   6929 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6930 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6931 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6932 	    WM_TXDESCS_SIZE(txq));
   6933 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6934 }
   6935 
   6936 static int
   6937 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6938 {
   6939 	int error;
   6940 	size_t rxq_descs_size;
   6941 
   6942 	/*
   6943 	 * Allocate the control data structures, and create and load the
   6944 	 * DMA map for it.
   6945 	 *
   6946 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6947 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6948 	 * both sets within the same 4G segment.
   6949 	 */
   6950 	rxq->rxq_ndesc = WM_NRXDESC;
   6951 	if (sc->sc_type == WM_T_82574)
   6952 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6953 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6954 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6955 	else
   6956 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6957 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6958 
   6959 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6960 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6961 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6962 		aprint_error_dev(sc->sc_dev,
   6963 		    "unable to allocate RX control data, error = %d\n",
   6964 		    error);
   6965 		goto fail_0;
   6966 	}
   6967 
   6968 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6969 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6970 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6971 		aprint_error_dev(sc->sc_dev,
   6972 		    "unable to map RX control data, error = %d\n", error);
   6973 		goto fail_1;
   6974 	}
   6975 
   6976 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6977 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6978 		aprint_error_dev(sc->sc_dev,
   6979 		    "unable to create RX control data DMA map, error = %d\n",
   6980 		    error);
   6981 		goto fail_2;
   6982 	}
   6983 
   6984 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6985 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6986 		aprint_error_dev(sc->sc_dev,
   6987 		    "unable to load RX control data DMA map, error = %d\n",
   6988 		    error);
   6989 		goto fail_3;
   6990 	}
   6991 
   6992 	return 0;
   6993 
   6994  fail_3:
   6995 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6996  fail_2:
   6997 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6998 	    rxq_descs_size);
   6999  fail_1:
   7000 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7001  fail_0:
   7002 	return error;
   7003 }
   7004 
   7005 static void
   7006 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7007 {
   7008 
   7009 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7010 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7011 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7012 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7013 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7014 }
   7015 
   7016 
   7017 static int
   7018 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7019 {
   7020 	int i, error;
   7021 
   7022 	/* Create the transmit buffer DMA maps. */
   7023 	WM_TXQUEUELEN(txq) =
   7024 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7025 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7026 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7027 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7028 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7029 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7030 			aprint_error_dev(sc->sc_dev,
   7031 			    "unable to create Tx DMA map %d, error = %d\n",
   7032 			    i, error);
   7033 			goto fail;
   7034 		}
   7035 	}
   7036 
   7037 	return 0;
   7038 
   7039  fail:
   7040 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7041 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7042 			bus_dmamap_destroy(sc->sc_dmat,
   7043 			    txq->txq_soft[i].txs_dmamap);
   7044 	}
   7045 	return error;
   7046 }
   7047 
   7048 static void
   7049 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7050 {
   7051 	int i;
   7052 
   7053 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7054 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7055 			bus_dmamap_destroy(sc->sc_dmat,
   7056 			    txq->txq_soft[i].txs_dmamap);
   7057 	}
   7058 }
   7059 
   7060 static int
   7061 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7062 {
   7063 	int i, error;
   7064 
   7065 	/* Create the receive buffer DMA maps. */
   7066 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7067 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7068 			    MCLBYTES, 0, 0,
   7069 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7070 			aprint_error_dev(sc->sc_dev,
   7071 			    "unable to create Rx DMA map %d error = %d\n",
   7072 			    i, error);
   7073 			goto fail;
   7074 		}
   7075 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7076 	}
   7077 
   7078 	return 0;
   7079 
   7080  fail:
   7081 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7082 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7083 			bus_dmamap_destroy(sc->sc_dmat,
   7084 			    rxq->rxq_soft[i].rxs_dmamap);
   7085 	}
   7086 	return error;
   7087 }
   7088 
   7089 static void
   7090 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7091 {
   7092 	int i;
   7093 
   7094 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7095 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7096 			bus_dmamap_destroy(sc->sc_dmat,
   7097 			    rxq->rxq_soft[i].rxs_dmamap);
   7098 	}
   7099 }
   7100 
   7101 /*
   7102  * wm_alloc_quques:
   7103  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7104  */
   7105 static int
   7106 wm_alloc_txrx_queues(struct wm_softc *sc)
   7107 {
   7108 	int i, error, tx_done, rx_done;
   7109 
   7110 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7111 	    KM_SLEEP);
   7112 	if (sc->sc_queue == NULL) {
   7113 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7114 		error = ENOMEM;
   7115 		goto fail_0;
   7116 	}
   7117 
   7118 	/* For transmission */
   7119 	error = 0;
   7120 	tx_done = 0;
   7121 	for (i = 0; i < sc->sc_nqueues; i++) {
   7122 #ifdef WM_EVENT_COUNTERS
   7123 		int j;
   7124 		const char *xname;
   7125 #endif
   7126 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7127 		txq->txq_sc = sc;
   7128 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7129 
   7130 		error = wm_alloc_tx_descs(sc, txq);
   7131 		if (error)
   7132 			break;
   7133 		error = wm_alloc_tx_buffer(sc, txq);
   7134 		if (error) {
   7135 			wm_free_tx_descs(sc, txq);
   7136 			break;
   7137 		}
   7138 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7139 		if (txq->txq_interq == NULL) {
   7140 			wm_free_tx_descs(sc, txq);
   7141 			wm_free_tx_buffer(sc, txq);
   7142 			error = ENOMEM;
   7143 			break;
   7144 		}
   7145 
   7146 #ifdef WM_EVENT_COUNTERS
   7147 		xname = device_xname(sc->sc_dev);
   7148 
   7149 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7150 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7151 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7152 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7153 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7154 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7155 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7156 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7157 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7158 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7159 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7160 
   7161 		for (j = 0; j < WM_NTXSEGS; j++) {
   7162 			snprintf(txq->txq_txseg_evcnt_names[j],
   7163 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7164 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7165 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7166 		}
   7167 
   7168 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7169 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7170 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7171 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7172 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7173 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7174 #endif /* WM_EVENT_COUNTERS */
   7175 
   7176 		tx_done++;
   7177 	}
   7178 	if (error)
   7179 		goto fail_1;
   7180 
   7181 	/* For receive */
   7182 	error = 0;
   7183 	rx_done = 0;
   7184 	for (i = 0; i < sc->sc_nqueues; i++) {
   7185 #ifdef WM_EVENT_COUNTERS
   7186 		const char *xname;
   7187 #endif
   7188 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7189 		rxq->rxq_sc = sc;
   7190 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7191 
   7192 		error = wm_alloc_rx_descs(sc, rxq);
   7193 		if (error)
   7194 			break;
   7195 
   7196 		error = wm_alloc_rx_buffer(sc, rxq);
   7197 		if (error) {
   7198 			wm_free_rx_descs(sc, rxq);
   7199 			break;
   7200 		}
   7201 
   7202 #ifdef WM_EVENT_COUNTERS
   7203 		xname = device_xname(sc->sc_dev);
   7204 
   7205 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7206 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7207 
   7208 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7209 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7210 #endif /* WM_EVENT_COUNTERS */
   7211 
   7212 		rx_done++;
   7213 	}
   7214 	if (error)
   7215 		goto fail_2;
   7216 
   7217 	return 0;
   7218 
   7219  fail_2:
   7220 	for (i = 0; i < rx_done; i++) {
   7221 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7222 		wm_free_rx_buffer(sc, rxq);
   7223 		wm_free_rx_descs(sc, rxq);
   7224 		if (rxq->rxq_lock)
   7225 			mutex_obj_free(rxq->rxq_lock);
   7226 	}
   7227  fail_1:
   7228 	for (i = 0; i < tx_done; i++) {
   7229 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7230 		pcq_destroy(txq->txq_interq);
   7231 		wm_free_tx_buffer(sc, txq);
   7232 		wm_free_tx_descs(sc, txq);
   7233 		if (txq->txq_lock)
   7234 			mutex_obj_free(txq->txq_lock);
   7235 	}
   7236 
   7237 	kmem_free(sc->sc_queue,
   7238 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7239  fail_0:
   7240 	return error;
   7241 }
   7242 
   7243 /*
   7244  * wm_free_quques:
   7245  *	Free {tx,rx}descs and {tx,rx} buffers
   7246  */
   7247 static void
   7248 wm_free_txrx_queues(struct wm_softc *sc)
   7249 {
   7250 	int i;
   7251 
   7252 	for (i = 0; i < sc->sc_nqueues; i++) {
   7253 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7254 
   7255 #ifdef WM_EVENT_COUNTERS
   7256 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7257 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7258 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7259 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7260 #endif /* WM_EVENT_COUNTERS */
   7261 
   7262 		wm_free_rx_buffer(sc, rxq);
   7263 		wm_free_rx_descs(sc, rxq);
   7264 		if (rxq->rxq_lock)
   7265 			mutex_obj_free(rxq->rxq_lock);
   7266 	}
   7267 
   7268 	for (i = 0; i < sc->sc_nqueues; i++) {
   7269 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7270 		struct mbuf *m;
   7271 #ifdef WM_EVENT_COUNTERS
   7272 		int j;
   7273 
   7274 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7275 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7276 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7277 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7278 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7279 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7280 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7281 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7282 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7283 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7284 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7285 
   7286 		for (j = 0; j < WM_NTXSEGS; j++)
   7287 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7288 
   7289 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7290 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7291 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7292 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7293 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7294 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7295 #endif /* WM_EVENT_COUNTERS */
   7296 
   7297 		/* Drain txq_interq */
   7298 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7299 			m_freem(m);
   7300 		pcq_destroy(txq->txq_interq);
   7301 
   7302 		wm_free_tx_buffer(sc, txq);
   7303 		wm_free_tx_descs(sc, txq);
   7304 		if (txq->txq_lock)
   7305 			mutex_obj_free(txq->txq_lock);
   7306 	}
   7307 
   7308 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7309 }
   7310 
   7311 static void
   7312 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7313 {
   7314 
   7315 	KASSERT(mutex_owned(txq->txq_lock));
   7316 
   7317 	/* Initialize the transmit descriptor ring. */
   7318 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7319 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7320 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7321 	txq->txq_free = WM_NTXDESC(txq);
   7322 	txq->txq_next = 0;
   7323 }
   7324 
   7325 static void
   7326 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7327     struct wm_txqueue *txq)
   7328 {
   7329 
   7330 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7331 		device_xname(sc->sc_dev), __func__));
   7332 	KASSERT(mutex_owned(txq->txq_lock));
   7333 
   7334 	if (sc->sc_type < WM_T_82543) {
   7335 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7336 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7337 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7338 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7339 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7340 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7341 	} else {
   7342 		int qid = wmq->wmq_id;
   7343 
   7344 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7345 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7346 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7347 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7348 
   7349 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7350 			/*
   7351 			 * Don't write TDT before TCTL.EN is set.
   7352 			 * See the document.
   7353 			 */
   7354 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7355 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7356 			    | TXDCTL_WTHRESH(0));
   7357 		else {
   7358 			/* XXX should update with AIM? */
   7359 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7360 			if (sc->sc_type >= WM_T_82540) {
   7361 				/* Should be the same */
   7362 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7363 			}
   7364 
   7365 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7366 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7367 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7368 		}
   7369 	}
   7370 }
   7371 
   7372 static void
   7373 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7374 {
   7375 	int i;
   7376 
   7377 	KASSERT(mutex_owned(txq->txq_lock));
   7378 
   7379 	/* Initialize the transmit job descriptors. */
   7380 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7381 		txq->txq_soft[i].txs_mbuf = NULL;
   7382 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7383 	txq->txq_snext = 0;
   7384 	txq->txq_sdirty = 0;
   7385 }
   7386 
   7387 static void
   7388 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7389     struct wm_txqueue *txq)
   7390 {
   7391 
   7392 	KASSERT(mutex_owned(txq->txq_lock));
   7393 
   7394 	/*
   7395 	 * Set up some register offsets that are different between
   7396 	 * the i82542 and the i82543 and later chips.
   7397 	 */
   7398 	if (sc->sc_type < WM_T_82543)
   7399 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7400 	else
   7401 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7402 
   7403 	wm_init_tx_descs(sc, txq);
   7404 	wm_init_tx_regs(sc, wmq, txq);
   7405 	wm_init_tx_buffer(sc, txq);
   7406 
   7407 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7408 	txq->txq_sending = false;
   7409 }
   7410 
   7411 static void
   7412 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7413     struct wm_rxqueue *rxq)
   7414 {
   7415 
   7416 	KASSERT(mutex_owned(rxq->rxq_lock));
   7417 
   7418 	/*
   7419 	 * Initialize the receive descriptor and receive job
   7420 	 * descriptor rings.
   7421 	 */
   7422 	if (sc->sc_type < WM_T_82543) {
   7423 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7424 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7425 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7426 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7427 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7428 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7429 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7430 
   7431 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7432 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7433 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7434 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7435 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7436 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7437 	} else {
   7438 		int qid = wmq->wmq_id;
   7439 
   7440 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7441 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7442 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7443 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7444 
   7445 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7446 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7447 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7448 
   7449 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7450 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7451 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7452 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7453 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7454 			    | RXDCTL_WTHRESH(1));
   7455 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7456 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7457 		} else {
   7458 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7459 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7460 			/* XXX should update with AIM? */
   7461 			CSR_WRITE(sc, WMREG_RDTR,
   7462 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7463 			/* MUST be same */
   7464 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7465 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7466 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7467 		}
   7468 	}
   7469 }
   7470 
   7471 static int
   7472 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7473 {
   7474 	struct wm_rxsoft *rxs;
   7475 	int error, i;
   7476 
   7477 	KASSERT(mutex_owned(rxq->rxq_lock));
   7478 
   7479 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7480 		rxs = &rxq->rxq_soft[i];
   7481 		if (rxs->rxs_mbuf == NULL) {
   7482 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7483 				log(LOG_ERR, "%s: unable to allocate or map "
   7484 				    "rx buffer %d, error = %d\n",
   7485 				    device_xname(sc->sc_dev), i, error);
   7486 				/*
   7487 				 * XXX Should attempt to run with fewer receive
   7488 				 * XXX buffers instead of just failing.
   7489 				 */
   7490 				wm_rxdrain(rxq);
   7491 				return ENOMEM;
   7492 			}
   7493 		} else {
   7494 			/*
   7495 			 * For 82575 and 82576, the RX descriptors must be
   7496 			 * initialized after the setting of RCTL.EN in
   7497 			 * wm_set_filter()
   7498 			 */
   7499 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7500 				wm_init_rxdesc(rxq, i);
   7501 		}
   7502 	}
   7503 	rxq->rxq_ptr = 0;
   7504 	rxq->rxq_discard = 0;
   7505 	WM_RXCHAIN_RESET(rxq);
   7506 
   7507 	return 0;
   7508 }
   7509 
   7510 static int
   7511 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7512     struct wm_rxqueue *rxq)
   7513 {
   7514 
   7515 	KASSERT(mutex_owned(rxq->rxq_lock));
   7516 
   7517 	/*
   7518 	 * Set up some register offsets that are different between
   7519 	 * the i82542 and the i82543 and later chips.
   7520 	 */
   7521 	if (sc->sc_type < WM_T_82543)
   7522 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7523 	else
   7524 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7525 
   7526 	wm_init_rx_regs(sc, wmq, rxq);
   7527 	return wm_init_rx_buffer(sc, rxq);
   7528 }
   7529 
   7530 /*
   7531  * wm_init_quques:
   7532  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7533  */
   7534 static int
   7535 wm_init_txrx_queues(struct wm_softc *sc)
   7536 {
   7537 	int i, error = 0;
   7538 
   7539 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7540 		device_xname(sc->sc_dev), __func__));
   7541 
   7542 	for (i = 0; i < sc->sc_nqueues; i++) {
   7543 		struct wm_queue *wmq = &sc->sc_queue[i];
   7544 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7545 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7546 
   7547 		/*
   7548 		 * TODO
   7549 		 * Currently, use constant variable instead of AIM.
   7550 		 * Furthermore, the interrupt interval of multiqueue which use
   7551 		 * polling mode is less than default value.
   7552 		 * More tuning and AIM are required.
   7553 		 */
   7554 		if (wm_is_using_multiqueue(sc))
   7555 			wmq->wmq_itr = 50;
   7556 		else
   7557 			wmq->wmq_itr = sc->sc_itr_init;
   7558 		wmq->wmq_set_itr = true;
   7559 
   7560 		mutex_enter(txq->txq_lock);
   7561 		wm_init_tx_queue(sc, wmq, txq);
   7562 		mutex_exit(txq->txq_lock);
   7563 
   7564 		mutex_enter(rxq->rxq_lock);
   7565 		error = wm_init_rx_queue(sc, wmq, rxq);
   7566 		mutex_exit(rxq->rxq_lock);
   7567 		if (error)
   7568 			break;
   7569 	}
   7570 
   7571 	return error;
   7572 }
   7573 
   7574 /*
   7575  * wm_tx_offload:
   7576  *
   7577  *	Set up TCP/IP checksumming parameters for the
   7578  *	specified packet.
   7579  */
   7580 static void
   7581 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7582     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7583 {
   7584 	struct mbuf *m0 = txs->txs_mbuf;
   7585 	struct livengood_tcpip_ctxdesc *t;
   7586 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7587 	uint32_t ipcse;
   7588 	struct ether_header *eh;
   7589 	int offset, iphl;
   7590 	uint8_t fields;
   7591 
   7592 	/*
   7593 	 * XXX It would be nice if the mbuf pkthdr had offset
   7594 	 * fields for the protocol headers.
   7595 	 */
   7596 
   7597 	eh = mtod(m0, struct ether_header *);
   7598 	switch (htons(eh->ether_type)) {
   7599 	case ETHERTYPE_IP:
   7600 	case ETHERTYPE_IPV6:
   7601 		offset = ETHER_HDR_LEN;
   7602 		break;
   7603 
   7604 	case ETHERTYPE_VLAN:
   7605 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7606 		break;
   7607 
   7608 	default:
   7609 		/* Don't support this protocol or encapsulation. */
   7610 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7611 		txq->txq_last_hw_ipcs = 0;
   7612 		txq->txq_last_hw_tucs = 0;
   7613 		*fieldsp = 0;
   7614 		*cmdp = 0;
   7615 		return;
   7616 	}
   7617 
   7618 	if ((m0->m_pkthdr.csum_flags &
   7619 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7620 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7621 	} else
   7622 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7623 
   7624 	ipcse = offset + iphl - 1;
   7625 
   7626 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7627 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7628 	seg = 0;
   7629 	fields = 0;
   7630 
   7631 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7632 		int hlen = offset + iphl;
   7633 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7634 
   7635 		if (__predict_false(m0->m_len <
   7636 				    (hlen + sizeof(struct tcphdr)))) {
   7637 			/*
   7638 			 * TCP/IP headers are not in the first mbuf; we need
   7639 			 * to do this the slow and painful way. Let's just
   7640 			 * hope this doesn't happen very often.
   7641 			 */
   7642 			struct tcphdr th;
   7643 
   7644 			WM_Q_EVCNT_INCR(txq, tsopain);
   7645 
   7646 			m_copydata(m0, hlen, sizeof(th), &th);
   7647 			if (v4) {
   7648 				struct ip ip;
   7649 
   7650 				m_copydata(m0, offset, sizeof(ip), &ip);
   7651 				ip.ip_len = 0;
   7652 				m_copyback(m0,
   7653 				    offset + offsetof(struct ip, ip_len),
   7654 				    sizeof(ip.ip_len), &ip.ip_len);
   7655 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7656 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7657 			} else {
   7658 				struct ip6_hdr ip6;
   7659 
   7660 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7661 				ip6.ip6_plen = 0;
   7662 				m_copyback(m0,
   7663 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7664 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7665 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7666 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7667 			}
   7668 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7669 			    sizeof(th.th_sum), &th.th_sum);
   7670 
   7671 			hlen += th.th_off << 2;
   7672 		} else {
   7673 			/*
   7674 			 * TCP/IP headers are in the first mbuf; we can do
   7675 			 * this the easy way.
   7676 			 */
   7677 			struct tcphdr *th;
   7678 
   7679 			if (v4) {
   7680 				struct ip *ip =
   7681 				    (void *)(mtod(m0, char *) + offset);
   7682 				th = (void *)(mtod(m0, char *) + hlen);
   7683 
   7684 				ip->ip_len = 0;
   7685 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7686 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7687 			} else {
   7688 				struct ip6_hdr *ip6 =
   7689 				    (void *)(mtod(m0, char *) + offset);
   7690 				th = (void *)(mtod(m0, char *) + hlen);
   7691 
   7692 				ip6->ip6_plen = 0;
   7693 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7694 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7695 			}
   7696 			hlen += th->th_off << 2;
   7697 		}
   7698 
   7699 		if (v4) {
   7700 			WM_Q_EVCNT_INCR(txq, tso);
   7701 			cmdlen |= WTX_TCPIP_CMD_IP;
   7702 		} else {
   7703 			WM_Q_EVCNT_INCR(txq, tso6);
   7704 			ipcse = 0;
   7705 		}
   7706 		cmd |= WTX_TCPIP_CMD_TSE;
   7707 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7708 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7709 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7710 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7711 	}
   7712 
   7713 	/*
   7714 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7715 	 * offload feature, if we load the context descriptor, we
   7716 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7717 	 */
   7718 
   7719 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7720 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7721 	    WTX_TCPIP_IPCSE(ipcse);
   7722 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7723 		WM_Q_EVCNT_INCR(txq, ipsum);
   7724 		fields |= WTX_IXSM;
   7725 	}
   7726 
   7727 	offset += iphl;
   7728 
   7729 	if (m0->m_pkthdr.csum_flags &
   7730 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7731 		WM_Q_EVCNT_INCR(txq, tusum);
   7732 		fields |= WTX_TXSM;
   7733 		tucs = WTX_TCPIP_TUCSS(offset) |
   7734 		    WTX_TCPIP_TUCSO(offset +
   7735 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7736 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7737 	} else if ((m0->m_pkthdr.csum_flags &
   7738 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7739 		WM_Q_EVCNT_INCR(txq, tusum6);
   7740 		fields |= WTX_TXSM;
   7741 		tucs = WTX_TCPIP_TUCSS(offset) |
   7742 		    WTX_TCPIP_TUCSO(offset +
   7743 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7744 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7745 	} else {
   7746 		/* Just initialize it to a valid TCP context. */
   7747 		tucs = WTX_TCPIP_TUCSS(offset) |
   7748 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7749 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7750 	}
   7751 
   7752 	*cmdp = cmd;
   7753 	*fieldsp = fields;
   7754 
   7755 	/*
   7756 	 * We don't have to write context descriptor for every packet
   7757 	 * except for 82574. For 82574, we must write context descriptor
   7758 	 * for every packet when we use two descriptor queues.
   7759 	 *
   7760 	 * The 82574L can only remember the *last* context used
   7761 	 * regardless of queue that it was use for.  We cannot reuse
   7762 	 * contexts on this hardware platform and must generate a new
   7763 	 * context every time.  82574L hardware spec, section 7.2.6,
   7764 	 * second note.
   7765 	 */
   7766 	if (sc->sc_nqueues < 2) {
   7767 		/*
   7768 		 * Setting up new checksum offload context for every
   7769 		 * frames takes a lot of processing time for hardware.
   7770 		 * This also reduces performance a lot for small sized
   7771 		 * frames so avoid it if driver can use previously
   7772 		 * configured checksum offload context.
   7773 		 * For TSO, in theory we can use the same TSO context only if
   7774 		 * frame is the same type(IP/TCP) and the same MSS. However
   7775 		 * checking whether a frame has the same IP/TCP structure is
   7776 		 * hard thing so just ignore that and always restablish a
   7777 		 * new TSO context.
   7778 		 */
   7779 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7780 		    == 0) {
   7781 			if (txq->txq_last_hw_cmd == cmd &&
   7782 			    txq->txq_last_hw_fields == fields &&
   7783 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7784 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7785 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7786 				return;
   7787 			}
   7788 		}
   7789 
   7790 		txq->txq_last_hw_cmd = cmd;
   7791 		txq->txq_last_hw_fields = fields;
   7792 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7793 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7794 	}
   7795 
   7796 	/* Fill in the context descriptor. */
   7797 	t = (struct livengood_tcpip_ctxdesc *)
   7798 	    &txq->txq_descs[txq->txq_next];
   7799 	t->tcpip_ipcs = htole32(ipcs);
   7800 	t->tcpip_tucs = htole32(tucs);
   7801 	t->tcpip_cmdlen = htole32(cmdlen);
   7802 	t->tcpip_seg = htole32(seg);
   7803 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7804 
   7805 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7806 	txs->txs_ndesc++;
   7807 }
   7808 
   7809 static inline int
   7810 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7811 {
   7812 	struct wm_softc *sc = ifp->if_softc;
   7813 	u_int cpuid = cpu_index(curcpu());
   7814 
   7815 	/*
   7816 	 * Currently, simple distribute strategy.
   7817 	 * TODO:
   7818 	 * distribute by flowid(RSS has value).
   7819 	 */
   7820 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7821 }
   7822 
   7823 static inline bool
   7824 wm_linkdown_discard(struct wm_txqueue *txq)
   7825 {
   7826 
   7827 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   7828 		return true;
   7829 
   7830 	return false;
   7831 }
   7832 
   7833 /*
   7834  * wm_start:		[ifnet interface function]
   7835  *
   7836  *	Start packet transmission on the interface.
   7837  */
   7838 static void
   7839 wm_start(struct ifnet *ifp)
   7840 {
   7841 	struct wm_softc *sc = ifp->if_softc;
   7842 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7843 
   7844 #ifdef WM_MPSAFE
   7845 	KASSERT(if_is_mpsafe(ifp));
   7846 #endif
   7847 	/*
   7848 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7849 	 */
   7850 
   7851 	mutex_enter(txq->txq_lock);
   7852 	if (!txq->txq_stopping)
   7853 		wm_start_locked(ifp);
   7854 	mutex_exit(txq->txq_lock);
   7855 }
   7856 
   7857 static void
   7858 wm_start_locked(struct ifnet *ifp)
   7859 {
   7860 	struct wm_softc *sc = ifp->if_softc;
   7861 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7862 
   7863 	wm_send_common_locked(ifp, txq, false);
   7864 }
   7865 
   7866 static int
   7867 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7868 {
   7869 	int qid;
   7870 	struct wm_softc *sc = ifp->if_softc;
   7871 	struct wm_txqueue *txq;
   7872 
   7873 	qid = wm_select_txqueue(ifp, m);
   7874 	txq = &sc->sc_queue[qid].wmq_txq;
   7875 
   7876 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7877 		m_freem(m);
   7878 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7879 		return ENOBUFS;
   7880 	}
   7881 
   7882 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7883 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7884 	if (m->m_flags & M_MCAST)
   7885 		if_statinc_ref(nsr, if_omcasts);
   7886 	IF_STAT_PUTREF(ifp);
   7887 
   7888 	if (mutex_tryenter(txq->txq_lock)) {
   7889 		if (!txq->txq_stopping)
   7890 			wm_transmit_locked(ifp, txq);
   7891 		mutex_exit(txq->txq_lock);
   7892 	}
   7893 
   7894 	return 0;
   7895 }
   7896 
   7897 static void
   7898 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7899 {
   7900 
   7901 	wm_send_common_locked(ifp, txq, true);
   7902 }
   7903 
   7904 static void
   7905 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7906     bool is_transmit)
   7907 {
   7908 	struct wm_softc *sc = ifp->if_softc;
   7909 	struct mbuf *m0;
   7910 	struct wm_txsoft *txs;
   7911 	bus_dmamap_t dmamap;
   7912 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7913 	bus_addr_t curaddr;
   7914 	bus_size_t seglen, curlen;
   7915 	uint32_t cksumcmd;
   7916 	uint8_t cksumfields;
   7917 	bool remap = true;
   7918 
   7919 	KASSERT(mutex_owned(txq->txq_lock));
   7920 
   7921 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7922 		return;
   7923 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7924 		return;
   7925 
   7926 	if (__predict_false(wm_linkdown_discard(txq))) {
   7927 		do {
   7928 			if (is_transmit)
   7929 				m0 = pcq_get(txq->txq_interq);
   7930 			else
   7931 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   7932 			/*
   7933 			 * increment successed packet counter as in the case
   7934 			 * which the packet is discarded by link down PHY.
   7935 			 */
   7936 			if (m0 != NULL)
   7937 				if_statinc(ifp, if_opackets);
   7938 			m_freem(m0);
   7939 		} while (m0 != NULL);
   7940 		return;
   7941 	}
   7942 
   7943 	/* Remember the previous number of free descriptors. */
   7944 	ofree = txq->txq_free;
   7945 
   7946 	/*
   7947 	 * Loop through the send queue, setting up transmit descriptors
   7948 	 * until we drain the queue, or use up all available transmit
   7949 	 * descriptors.
   7950 	 */
   7951 	for (;;) {
   7952 		m0 = NULL;
   7953 
   7954 		/* Get a work queue entry. */
   7955 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7956 			wm_txeof(txq, UINT_MAX);
   7957 			if (txq->txq_sfree == 0) {
   7958 				DPRINTF(sc, WM_DEBUG_TX,
   7959 				    ("%s: TX: no free job descriptors\n",
   7960 					device_xname(sc->sc_dev)));
   7961 				WM_Q_EVCNT_INCR(txq, txsstall);
   7962 				break;
   7963 			}
   7964 		}
   7965 
   7966 		/* Grab a packet off the queue. */
   7967 		if (is_transmit)
   7968 			m0 = pcq_get(txq->txq_interq);
   7969 		else
   7970 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7971 		if (m0 == NULL)
   7972 			break;
   7973 
   7974 		DPRINTF(sc, WM_DEBUG_TX,
   7975 		    ("%s: TX: have packet to transmit: %p\n",
   7976 			device_xname(sc->sc_dev), m0));
   7977 
   7978 		txs = &txq->txq_soft[txq->txq_snext];
   7979 		dmamap = txs->txs_dmamap;
   7980 
   7981 		use_tso = (m0->m_pkthdr.csum_flags &
   7982 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7983 
   7984 		/*
   7985 		 * So says the Linux driver:
   7986 		 * The controller does a simple calculation to make sure
   7987 		 * there is enough room in the FIFO before initiating the
   7988 		 * DMA for each buffer. The calc is:
   7989 		 *	4 = ceil(buffer len / MSS)
   7990 		 * To make sure we don't overrun the FIFO, adjust the max
   7991 		 * buffer len if the MSS drops.
   7992 		 */
   7993 		dmamap->dm_maxsegsz =
   7994 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7995 		    ? m0->m_pkthdr.segsz << 2
   7996 		    : WTX_MAX_LEN;
   7997 
   7998 		/*
   7999 		 * Load the DMA map.  If this fails, the packet either
   8000 		 * didn't fit in the allotted number of segments, or we
   8001 		 * were short on resources.  For the too-many-segments
   8002 		 * case, we simply report an error and drop the packet,
   8003 		 * since we can't sanely copy a jumbo packet to a single
   8004 		 * buffer.
   8005 		 */
   8006 retry:
   8007 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8008 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8009 		if (__predict_false(error)) {
   8010 			if (error == EFBIG) {
   8011 				if (remap == true) {
   8012 					struct mbuf *m;
   8013 
   8014 					remap = false;
   8015 					m = m_defrag(m0, M_NOWAIT);
   8016 					if (m != NULL) {
   8017 						WM_Q_EVCNT_INCR(txq, defrag);
   8018 						m0 = m;
   8019 						goto retry;
   8020 					}
   8021 				}
   8022 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8023 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8024 				    "DMA segments, dropping...\n",
   8025 				    device_xname(sc->sc_dev));
   8026 				wm_dump_mbuf_chain(sc, m0);
   8027 				m_freem(m0);
   8028 				continue;
   8029 			}
   8030 			/* Short on resources, just stop for now. */
   8031 			DPRINTF(sc, WM_DEBUG_TX,
   8032 			    ("%s: TX: dmamap load failed: %d\n",
   8033 				device_xname(sc->sc_dev), error));
   8034 			break;
   8035 		}
   8036 
   8037 		segs_needed = dmamap->dm_nsegs;
   8038 		if (use_tso) {
   8039 			/* For sentinel descriptor; see below. */
   8040 			segs_needed++;
   8041 		}
   8042 
   8043 		/*
   8044 		 * Ensure we have enough descriptors free to describe
   8045 		 * the packet. Note, we always reserve one descriptor
   8046 		 * at the end of the ring due to the semantics of the
   8047 		 * TDT register, plus one more in the event we need
   8048 		 * to load offload context.
   8049 		 */
   8050 		if (segs_needed > txq->txq_free - 2) {
   8051 			/*
   8052 			 * Not enough free descriptors to transmit this
   8053 			 * packet.  We haven't committed anything yet,
   8054 			 * so just unload the DMA map, put the packet
   8055 			 * pack on the queue, and punt. Notify the upper
   8056 			 * layer that there are no more slots left.
   8057 			 */
   8058 			DPRINTF(sc, WM_DEBUG_TX,
   8059 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8060 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8061 				segs_needed, txq->txq_free - 1));
   8062 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8063 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8064 			WM_Q_EVCNT_INCR(txq, txdstall);
   8065 			break;
   8066 		}
   8067 
   8068 		/*
   8069 		 * Check for 82547 Tx FIFO bug. We need to do this
   8070 		 * once we know we can transmit the packet, since we
   8071 		 * do some internal FIFO space accounting here.
   8072 		 */
   8073 		if (sc->sc_type == WM_T_82547 &&
   8074 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8075 			DPRINTF(sc, WM_DEBUG_TX,
   8076 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8077 				device_xname(sc->sc_dev)));
   8078 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8079 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8080 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8081 			break;
   8082 		}
   8083 
   8084 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8085 
   8086 		DPRINTF(sc, WM_DEBUG_TX,
   8087 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8088 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8089 
   8090 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8091 
   8092 		/*
   8093 		 * Store a pointer to the packet so that we can free it
   8094 		 * later.
   8095 		 *
   8096 		 * Initially, we consider the number of descriptors the
   8097 		 * packet uses the number of DMA segments.  This may be
   8098 		 * incremented by 1 if we do checksum offload (a descriptor
   8099 		 * is used to set the checksum context).
   8100 		 */
   8101 		txs->txs_mbuf = m0;
   8102 		txs->txs_firstdesc = txq->txq_next;
   8103 		txs->txs_ndesc = segs_needed;
   8104 
   8105 		/* Set up offload parameters for this packet. */
   8106 		if (m0->m_pkthdr.csum_flags &
   8107 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8108 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8109 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8110 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8111 		} else {
   8112 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8113 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8114 			cksumcmd = 0;
   8115 			cksumfields = 0;
   8116 		}
   8117 
   8118 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8119 
   8120 		/* Sync the DMA map. */
   8121 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8122 		    BUS_DMASYNC_PREWRITE);
   8123 
   8124 		/* Initialize the transmit descriptor. */
   8125 		for (nexttx = txq->txq_next, seg = 0;
   8126 		     seg < dmamap->dm_nsegs; seg++) {
   8127 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8128 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8129 			     seglen != 0;
   8130 			     curaddr += curlen, seglen -= curlen,
   8131 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8132 				curlen = seglen;
   8133 
   8134 				/*
   8135 				 * So says the Linux driver:
   8136 				 * Work around for premature descriptor
   8137 				 * write-backs in TSO mode.  Append a
   8138 				 * 4-byte sentinel descriptor.
   8139 				 */
   8140 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8141 				    curlen > 8)
   8142 					curlen -= 4;
   8143 
   8144 				wm_set_dma_addr(
   8145 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8146 				txq->txq_descs[nexttx].wtx_cmdlen
   8147 				    = htole32(cksumcmd | curlen);
   8148 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8149 				    = 0;
   8150 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8151 				    = cksumfields;
   8152 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8153 				lasttx = nexttx;
   8154 
   8155 				DPRINTF(sc, WM_DEBUG_TX,
   8156 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8157 					"len %#04zx\n",
   8158 					device_xname(sc->sc_dev), nexttx,
   8159 					(uint64_t)curaddr, curlen));
   8160 			}
   8161 		}
   8162 
   8163 		KASSERT(lasttx != -1);
   8164 
   8165 		/*
   8166 		 * Set up the command byte on the last descriptor of
   8167 		 * the packet. If we're in the interrupt delay window,
   8168 		 * delay the interrupt.
   8169 		 */
   8170 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8171 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8172 
   8173 		/*
   8174 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8175 		 * up the descriptor to encapsulate the packet for us.
   8176 		 *
   8177 		 * This is only valid on the last descriptor of the packet.
   8178 		 */
   8179 		if (vlan_has_tag(m0)) {
   8180 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8181 			    htole32(WTX_CMD_VLE);
   8182 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8183 			    = htole16(vlan_get_tag(m0));
   8184 		}
   8185 
   8186 		txs->txs_lastdesc = lasttx;
   8187 
   8188 		DPRINTF(sc, WM_DEBUG_TX,
   8189 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8190 			device_xname(sc->sc_dev),
   8191 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8192 
   8193 		/* Sync the descriptors we're using. */
   8194 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8195 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8196 
   8197 		/* Give the packet to the chip. */
   8198 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8199 
   8200 		DPRINTF(sc, WM_DEBUG_TX,
   8201 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8202 
   8203 		DPRINTF(sc, WM_DEBUG_TX,
   8204 		    ("%s: TX: finished transmitting packet, job %d\n",
   8205 			device_xname(sc->sc_dev), txq->txq_snext));
   8206 
   8207 		/* Advance the tx pointer. */
   8208 		txq->txq_free -= txs->txs_ndesc;
   8209 		txq->txq_next = nexttx;
   8210 
   8211 		txq->txq_sfree--;
   8212 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8213 
   8214 		/* Pass the packet to any BPF listeners. */
   8215 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8216 	}
   8217 
   8218 	if (m0 != NULL) {
   8219 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8220 		WM_Q_EVCNT_INCR(txq, descdrop);
   8221 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8222 			__func__));
   8223 		m_freem(m0);
   8224 	}
   8225 
   8226 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8227 		/* No more slots; notify upper layer. */
   8228 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8229 	}
   8230 
   8231 	if (txq->txq_free != ofree) {
   8232 		/* Set a watchdog timer in case the chip flakes out. */
   8233 		txq->txq_lastsent = time_uptime;
   8234 		txq->txq_sending = true;
   8235 	}
   8236 }
   8237 
   8238 /*
   8239  * wm_nq_tx_offload:
   8240  *
   8241  *	Set up TCP/IP checksumming parameters for the
   8242  *	specified packet, for NEWQUEUE devices
   8243  */
   8244 static void
   8245 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8246     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8247 {
   8248 	struct mbuf *m0 = txs->txs_mbuf;
   8249 	uint32_t vl_len, mssidx, cmdc;
   8250 	struct ether_header *eh;
   8251 	int offset, iphl;
   8252 
   8253 	/*
   8254 	 * XXX It would be nice if the mbuf pkthdr had offset
   8255 	 * fields for the protocol headers.
   8256 	 */
   8257 	*cmdlenp = 0;
   8258 	*fieldsp = 0;
   8259 
   8260 	eh = mtod(m0, struct ether_header *);
   8261 	switch (htons(eh->ether_type)) {
   8262 	case ETHERTYPE_IP:
   8263 	case ETHERTYPE_IPV6:
   8264 		offset = ETHER_HDR_LEN;
   8265 		break;
   8266 
   8267 	case ETHERTYPE_VLAN:
   8268 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8269 		break;
   8270 
   8271 	default:
   8272 		/* Don't support this protocol or encapsulation. */
   8273 		*do_csum = false;
   8274 		return;
   8275 	}
   8276 	*do_csum = true;
   8277 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8278 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8279 
   8280 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8281 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8282 
   8283 	if ((m0->m_pkthdr.csum_flags &
   8284 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8285 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8286 	} else {
   8287 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8288 	}
   8289 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8290 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8291 
   8292 	if (vlan_has_tag(m0)) {
   8293 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8294 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8295 		*cmdlenp |= NQTX_CMD_VLE;
   8296 	}
   8297 
   8298 	mssidx = 0;
   8299 
   8300 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8301 		int hlen = offset + iphl;
   8302 		int tcp_hlen;
   8303 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8304 
   8305 		if (__predict_false(m0->m_len <
   8306 				    (hlen + sizeof(struct tcphdr)))) {
   8307 			/*
   8308 			 * TCP/IP headers are not in the first mbuf; we need
   8309 			 * to do this the slow and painful way. Let's just
   8310 			 * hope this doesn't happen very often.
   8311 			 */
   8312 			struct tcphdr th;
   8313 
   8314 			WM_Q_EVCNT_INCR(txq, tsopain);
   8315 
   8316 			m_copydata(m0, hlen, sizeof(th), &th);
   8317 			if (v4) {
   8318 				struct ip ip;
   8319 
   8320 				m_copydata(m0, offset, sizeof(ip), &ip);
   8321 				ip.ip_len = 0;
   8322 				m_copyback(m0,
   8323 				    offset + offsetof(struct ip, ip_len),
   8324 				    sizeof(ip.ip_len), &ip.ip_len);
   8325 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8326 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8327 			} else {
   8328 				struct ip6_hdr ip6;
   8329 
   8330 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8331 				ip6.ip6_plen = 0;
   8332 				m_copyback(m0,
   8333 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8334 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8335 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8336 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8337 			}
   8338 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8339 			    sizeof(th.th_sum), &th.th_sum);
   8340 
   8341 			tcp_hlen = th.th_off << 2;
   8342 		} else {
   8343 			/*
   8344 			 * TCP/IP headers are in the first mbuf; we can do
   8345 			 * this the easy way.
   8346 			 */
   8347 			struct tcphdr *th;
   8348 
   8349 			if (v4) {
   8350 				struct ip *ip =
   8351 				    (void *)(mtod(m0, char *) + offset);
   8352 				th = (void *)(mtod(m0, char *) + hlen);
   8353 
   8354 				ip->ip_len = 0;
   8355 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8356 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8357 			} else {
   8358 				struct ip6_hdr *ip6 =
   8359 				    (void *)(mtod(m0, char *) + offset);
   8360 				th = (void *)(mtod(m0, char *) + hlen);
   8361 
   8362 				ip6->ip6_plen = 0;
   8363 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8364 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8365 			}
   8366 			tcp_hlen = th->th_off << 2;
   8367 		}
   8368 		hlen += tcp_hlen;
   8369 		*cmdlenp |= NQTX_CMD_TSE;
   8370 
   8371 		if (v4) {
   8372 			WM_Q_EVCNT_INCR(txq, tso);
   8373 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8374 		} else {
   8375 			WM_Q_EVCNT_INCR(txq, tso6);
   8376 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8377 		}
   8378 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8379 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8380 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8381 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8382 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8383 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8384 	} else {
   8385 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8386 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8387 	}
   8388 
   8389 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8390 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8391 		cmdc |= NQTXC_CMD_IP4;
   8392 	}
   8393 
   8394 	if (m0->m_pkthdr.csum_flags &
   8395 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8396 		WM_Q_EVCNT_INCR(txq, tusum);
   8397 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8398 			cmdc |= NQTXC_CMD_TCP;
   8399 		else
   8400 			cmdc |= NQTXC_CMD_UDP;
   8401 
   8402 		cmdc |= NQTXC_CMD_IP4;
   8403 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8404 	}
   8405 	if (m0->m_pkthdr.csum_flags &
   8406 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8407 		WM_Q_EVCNT_INCR(txq, tusum6);
   8408 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8409 			cmdc |= NQTXC_CMD_TCP;
   8410 		else
   8411 			cmdc |= NQTXC_CMD_UDP;
   8412 
   8413 		cmdc |= NQTXC_CMD_IP6;
   8414 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8415 	}
   8416 
   8417 	/*
   8418 	 * We don't have to write context descriptor for every packet to
   8419 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8420 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8421 	 * controllers.
   8422 	 * It would be overhead to write context descriptor for every packet,
   8423 	 * however it does not cause problems.
   8424 	 */
   8425 	/* Fill in the context descriptor. */
   8426 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8427 	    htole32(vl_len);
   8428 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8429 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8430 	    htole32(cmdc);
   8431 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8432 	    htole32(mssidx);
   8433 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8434 	DPRINTF(sc, WM_DEBUG_TX,
   8435 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8436 		txq->txq_next, 0, vl_len));
   8437 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8438 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8439 	txs->txs_ndesc++;
   8440 }
   8441 
   8442 /*
   8443  * wm_nq_start:		[ifnet interface function]
   8444  *
   8445  *	Start packet transmission on the interface for NEWQUEUE devices
   8446  */
   8447 static void
   8448 wm_nq_start(struct ifnet *ifp)
   8449 {
   8450 	struct wm_softc *sc = ifp->if_softc;
   8451 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8452 
   8453 #ifdef WM_MPSAFE
   8454 	KASSERT(if_is_mpsafe(ifp));
   8455 #endif
   8456 	/*
   8457 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8458 	 */
   8459 
   8460 	mutex_enter(txq->txq_lock);
   8461 	if (!txq->txq_stopping)
   8462 		wm_nq_start_locked(ifp);
   8463 	mutex_exit(txq->txq_lock);
   8464 }
   8465 
   8466 static void
   8467 wm_nq_start_locked(struct ifnet *ifp)
   8468 {
   8469 	struct wm_softc *sc = ifp->if_softc;
   8470 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8471 
   8472 	wm_nq_send_common_locked(ifp, txq, false);
   8473 }
   8474 
   8475 static int
   8476 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8477 {
   8478 	int qid;
   8479 	struct wm_softc *sc = ifp->if_softc;
   8480 	struct wm_txqueue *txq;
   8481 
   8482 	qid = wm_select_txqueue(ifp, m);
   8483 	txq = &sc->sc_queue[qid].wmq_txq;
   8484 
   8485 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8486 		m_freem(m);
   8487 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8488 		return ENOBUFS;
   8489 	}
   8490 
   8491 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8492 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8493 	if (m->m_flags & M_MCAST)
   8494 		if_statinc_ref(nsr, if_omcasts);
   8495 	IF_STAT_PUTREF(ifp);
   8496 
   8497 	/*
   8498 	 * The situations which this mutex_tryenter() fails at running time
   8499 	 * are below two patterns.
   8500 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8501 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8502 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8503 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8504 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8505 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8506 	 * stuck, either.
   8507 	 */
   8508 	if (mutex_tryenter(txq->txq_lock)) {
   8509 		if (!txq->txq_stopping)
   8510 			wm_nq_transmit_locked(ifp, txq);
   8511 		mutex_exit(txq->txq_lock);
   8512 	}
   8513 
   8514 	return 0;
   8515 }
   8516 
   8517 static void
   8518 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8519 {
   8520 
   8521 	wm_nq_send_common_locked(ifp, txq, true);
   8522 }
   8523 
   8524 static void
   8525 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8526     bool is_transmit)
   8527 {
   8528 	struct wm_softc *sc = ifp->if_softc;
   8529 	struct mbuf *m0;
   8530 	struct wm_txsoft *txs;
   8531 	bus_dmamap_t dmamap;
   8532 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8533 	bool do_csum, sent;
   8534 	bool remap = true;
   8535 
   8536 	KASSERT(mutex_owned(txq->txq_lock));
   8537 
   8538 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8539 		return;
   8540 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8541 		return;
   8542 
   8543 	if (__predict_false(wm_linkdown_discard(txq))) {
   8544 		do {
   8545 			if (is_transmit)
   8546 				m0 = pcq_get(txq->txq_interq);
   8547 			else
   8548 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8549 			/*
   8550 			 * increment successed packet counter as in the case
   8551 			 * which the packet is discarded by link down PHY.
   8552 			 */
   8553 			if (m0 != NULL)
   8554 				if_statinc(ifp, if_opackets);
   8555 			m_freem(m0);
   8556 		} while (m0 != NULL);
   8557 		return;
   8558 	}
   8559 
   8560 	sent = false;
   8561 
   8562 	/*
   8563 	 * Loop through the send queue, setting up transmit descriptors
   8564 	 * until we drain the queue, or use up all available transmit
   8565 	 * descriptors.
   8566 	 */
   8567 	for (;;) {
   8568 		m0 = NULL;
   8569 
   8570 		/* Get a work queue entry. */
   8571 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8572 			wm_txeof(txq, UINT_MAX);
   8573 			if (txq->txq_sfree == 0) {
   8574 				DPRINTF(sc, WM_DEBUG_TX,
   8575 				    ("%s: TX: no free job descriptors\n",
   8576 					device_xname(sc->sc_dev)));
   8577 				WM_Q_EVCNT_INCR(txq, txsstall);
   8578 				break;
   8579 			}
   8580 		}
   8581 
   8582 		/* Grab a packet off the queue. */
   8583 		if (is_transmit)
   8584 			m0 = pcq_get(txq->txq_interq);
   8585 		else
   8586 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8587 		if (m0 == NULL)
   8588 			break;
   8589 
   8590 		DPRINTF(sc, WM_DEBUG_TX,
   8591 		    ("%s: TX: have packet to transmit: %p\n",
   8592 		    device_xname(sc->sc_dev), m0));
   8593 
   8594 		txs = &txq->txq_soft[txq->txq_snext];
   8595 		dmamap = txs->txs_dmamap;
   8596 
   8597 		/*
   8598 		 * Load the DMA map.  If this fails, the packet either
   8599 		 * didn't fit in the allotted number of segments, or we
   8600 		 * were short on resources.  For the too-many-segments
   8601 		 * case, we simply report an error and drop the packet,
   8602 		 * since we can't sanely copy a jumbo packet to a single
   8603 		 * buffer.
   8604 		 */
   8605 retry:
   8606 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8607 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8608 		if (__predict_false(error)) {
   8609 			if (error == EFBIG) {
   8610 				if (remap == true) {
   8611 					struct mbuf *m;
   8612 
   8613 					remap = false;
   8614 					m = m_defrag(m0, M_NOWAIT);
   8615 					if (m != NULL) {
   8616 						WM_Q_EVCNT_INCR(txq, defrag);
   8617 						m0 = m;
   8618 						goto retry;
   8619 					}
   8620 				}
   8621 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8622 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8623 				    "DMA segments, dropping...\n",
   8624 				    device_xname(sc->sc_dev));
   8625 				wm_dump_mbuf_chain(sc, m0);
   8626 				m_freem(m0);
   8627 				continue;
   8628 			}
   8629 			/* Short on resources, just stop for now. */
   8630 			DPRINTF(sc, WM_DEBUG_TX,
   8631 			    ("%s: TX: dmamap load failed: %d\n",
   8632 				device_xname(sc->sc_dev), error));
   8633 			break;
   8634 		}
   8635 
   8636 		segs_needed = dmamap->dm_nsegs;
   8637 
   8638 		/*
   8639 		 * Ensure we have enough descriptors free to describe
   8640 		 * the packet. Note, we always reserve one descriptor
   8641 		 * at the end of the ring due to the semantics of the
   8642 		 * TDT register, plus one more in the event we need
   8643 		 * to load offload context.
   8644 		 */
   8645 		if (segs_needed > txq->txq_free - 2) {
   8646 			/*
   8647 			 * Not enough free descriptors to transmit this
   8648 			 * packet.  We haven't committed anything yet,
   8649 			 * so just unload the DMA map, put the packet
   8650 			 * pack on the queue, and punt. Notify the upper
   8651 			 * layer that there are no more slots left.
   8652 			 */
   8653 			DPRINTF(sc, WM_DEBUG_TX,
   8654 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8655 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8656 				segs_needed, txq->txq_free - 1));
   8657 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8658 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8659 			WM_Q_EVCNT_INCR(txq, txdstall);
   8660 			break;
   8661 		}
   8662 
   8663 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8664 
   8665 		DPRINTF(sc, WM_DEBUG_TX,
   8666 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8667 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8668 
   8669 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8670 
   8671 		/*
   8672 		 * Store a pointer to the packet so that we can free it
   8673 		 * later.
   8674 		 *
   8675 		 * Initially, we consider the number of descriptors the
   8676 		 * packet uses the number of DMA segments.  This may be
   8677 		 * incremented by 1 if we do checksum offload (a descriptor
   8678 		 * is used to set the checksum context).
   8679 		 */
   8680 		txs->txs_mbuf = m0;
   8681 		txs->txs_firstdesc = txq->txq_next;
   8682 		txs->txs_ndesc = segs_needed;
   8683 
   8684 		/* Set up offload parameters for this packet. */
   8685 		uint32_t cmdlen, fields, dcmdlen;
   8686 		if (m0->m_pkthdr.csum_flags &
   8687 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8688 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8689 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8690 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8691 			    &do_csum);
   8692 		} else {
   8693 			do_csum = false;
   8694 			cmdlen = 0;
   8695 			fields = 0;
   8696 		}
   8697 
   8698 		/* Sync the DMA map. */
   8699 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8700 		    BUS_DMASYNC_PREWRITE);
   8701 
   8702 		/* Initialize the first transmit descriptor. */
   8703 		nexttx = txq->txq_next;
   8704 		if (!do_csum) {
   8705 			/* Setup a legacy descriptor */
   8706 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8707 			    dmamap->dm_segs[0].ds_addr);
   8708 			txq->txq_descs[nexttx].wtx_cmdlen =
   8709 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8710 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8711 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8712 			if (vlan_has_tag(m0)) {
   8713 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8714 				    htole32(WTX_CMD_VLE);
   8715 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8716 				    htole16(vlan_get_tag(m0));
   8717 			} else
   8718 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8719 
   8720 			dcmdlen = 0;
   8721 		} else {
   8722 			/* Setup an advanced data descriptor */
   8723 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8724 			    htole64(dmamap->dm_segs[0].ds_addr);
   8725 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8726 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8727 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8728 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8729 			    htole32(fields);
   8730 			DPRINTF(sc, WM_DEBUG_TX,
   8731 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8732 				device_xname(sc->sc_dev), nexttx,
   8733 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8734 			DPRINTF(sc, WM_DEBUG_TX,
   8735 			    ("\t 0x%08x%08x\n", fields,
   8736 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8737 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8738 		}
   8739 
   8740 		lasttx = nexttx;
   8741 		nexttx = WM_NEXTTX(txq, nexttx);
   8742 		/*
   8743 		 * Fill in the next descriptors. legacy or advanced format
   8744 		 * is the same here
   8745 		 */
   8746 		for (seg = 1; seg < dmamap->dm_nsegs;
   8747 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8748 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8749 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8750 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8751 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8752 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8753 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8754 			lasttx = nexttx;
   8755 
   8756 			DPRINTF(sc, WM_DEBUG_TX,
   8757 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8758 				device_xname(sc->sc_dev), nexttx,
   8759 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8760 				dmamap->dm_segs[seg].ds_len));
   8761 		}
   8762 
   8763 		KASSERT(lasttx != -1);
   8764 
   8765 		/*
   8766 		 * Set up the command byte on the last descriptor of
   8767 		 * the packet. If we're in the interrupt delay window,
   8768 		 * delay the interrupt.
   8769 		 */
   8770 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8771 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8772 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8773 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8774 
   8775 		txs->txs_lastdesc = lasttx;
   8776 
   8777 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8778 		    device_xname(sc->sc_dev),
   8779 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8780 
   8781 		/* Sync the descriptors we're using. */
   8782 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8783 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8784 
   8785 		/* Give the packet to the chip. */
   8786 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8787 		sent = true;
   8788 
   8789 		DPRINTF(sc, WM_DEBUG_TX,
   8790 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8791 
   8792 		DPRINTF(sc, WM_DEBUG_TX,
   8793 		    ("%s: TX: finished transmitting packet, job %d\n",
   8794 			device_xname(sc->sc_dev), txq->txq_snext));
   8795 
   8796 		/* Advance the tx pointer. */
   8797 		txq->txq_free -= txs->txs_ndesc;
   8798 		txq->txq_next = nexttx;
   8799 
   8800 		txq->txq_sfree--;
   8801 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8802 
   8803 		/* Pass the packet to any BPF listeners. */
   8804 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8805 	}
   8806 
   8807 	if (m0 != NULL) {
   8808 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8809 		WM_Q_EVCNT_INCR(txq, descdrop);
   8810 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8811 			__func__));
   8812 		m_freem(m0);
   8813 	}
   8814 
   8815 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8816 		/* No more slots; notify upper layer. */
   8817 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8818 	}
   8819 
   8820 	if (sent) {
   8821 		/* Set a watchdog timer in case the chip flakes out. */
   8822 		txq->txq_lastsent = time_uptime;
   8823 		txq->txq_sending = true;
   8824 	}
   8825 }
   8826 
   8827 static void
   8828 wm_deferred_start_locked(struct wm_txqueue *txq)
   8829 {
   8830 	struct wm_softc *sc = txq->txq_sc;
   8831 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8832 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8833 	int qid = wmq->wmq_id;
   8834 
   8835 	KASSERT(mutex_owned(txq->txq_lock));
   8836 
   8837 	if (txq->txq_stopping) {
   8838 		mutex_exit(txq->txq_lock);
   8839 		return;
   8840 	}
   8841 
   8842 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8843 		/* XXX need for ALTQ or one CPU system */
   8844 		if (qid == 0)
   8845 			wm_nq_start_locked(ifp);
   8846 		wm_nq_transmit_locked(ifp, txq);
   8847 	} else {
   8848 		/* XXX need for ALTQ or one CPU system */
   8849 		if (qid == 0)
   8850 			wm_start_locked(ifp);
   8851 		wm_transmit_locked(ifp, txq);
   8852 	}
   8853 }
   8854 
   8855 /* Interrupt */
   8856 
   8857 /*
   8858  * wm_txeof:
   8859  *
   8860  *	Helper; handle transmit interrupts.
   8861  */
   8862 static bool
   8863 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8864 {
   8865 	struct wm_softc *sc = txq->txq_sc;
   8866 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8867 	struct wm_txsoft *txs;
   8868 	int count = 0;
   8869 	int i;
   8870 	uint8_t status;
   8871 	bool more = false;
   8872 
   8873 	KASSERT(mutex_owned(txq->txq_lock));
   8874 
   8875 	if (txq->txq_stopping)
   8876 		return false;
   8877 
   8878 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8879 
   8880 	/*
   8881 	 * Go through the Tx list and free mbufs for those
   8882 	 * frames which have been transmitted.
   8883 	 */
   8884 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8885 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8886 		if (limit-- == 0) {
   8887 			more = true;
   8888 			DPRINTF(sc, WM_DEBUG_TX,
   8889 			    ("%s: TX: loop limited, job %d is not processed\n",
   8890 				device_xname(sc->sc_dev), i));
   8891 			break;
   8892 		}
   8893 
   8894 		txs = &txq->txq_soft[i];
   8895 
   8896 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8897 			device_xname(sc->sc_dev), i));
   8898 
   8899 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8900 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8901 
   8902 		status =
   8903 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8904 		if ((status & WTX_ST_DD) == 0) {
   8905 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8906 			    BUS_DMASYNC_PREREAD);
   8907 			break;
   8908 		}
   8909 
   8910 		count++;
   8911 		DPRINTF(sc, WM_DEBUG_TX,
   8912 		    ("%s: TX: job %d done: descs %d..%d\n",
   8913 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8914 		    txs->txs_lastdesc));
   8915 
   8916 		/*
   8917 		 * XXX We should probably be using the statistics
   8918 		 * XXX registers, but I don't know if they exist
   8919 		 * XXX on chips before the i82544.
   8920 		 */
   8921 
   8922 #ifdef WM_EVENT_COUNTERS
   8923 		if (status & WTX_ST_TU)
   8924 			WM_Q_EVCNT_INCR(txq, underrun);
   8925 #endif /* WM_EVENT_COUNTERS */
   8926 
   8927 		/*
   8928 		 * 82574 and newer's document says the status field has neither
   8929 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8930 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8931 		 * Developer's Manual", 82574 datasheet and newer.
   8932 		 *
   8933 		 * XXX I saw the LC bit was set on I218 even though the media
   8934 		 * was full duplex, so the bit might be used for other
   8935 		 * meaning ...(I have no document).
   8936 		 */
   8937 
   8938 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8939 		    && ((sc->sc_type < WM_T_82574)
   8940 			|| (sc->sc_type == WM_T_80003))) {
   8941 			if_statinc(ifp, if_oerrors);
   8942 			if (status & WTX_ST_LC)
   8943 				log(LOG_WARNING, "%s: late collision\n",
   8944 				    device_xname(sc->sc_dev));
   8945 			else if (status & WTX_ST_EC) {
   8946 				if_statadd(ifp, if_collisions,
   8947 				    TX_COLLISION_THRESHOLD + 1);
   8948 				log(LOG_WARNING, "%s: excessive collisions\n",
   8949 				    device_xname(sc->sc_dev));
   8950 			}
   8951 		} else
   8952 			if_statinc(ifp, if_opackets);
   8953 
   8954 		txq->txq_packets++;
   8955 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8956 
   8957 		txq->txq_free += txs->txs_ndesc;
   8958 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8959 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8960 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8961 		m_freem(txs->txs_mbuf);
   8962 		txs->txs_mbuf = NULL;
   8963 	}
   8964 
   8965 	/* Update the dirty transmit buffer pointer. */
   8966 	txq->txq_sdirty = i;
   8967 	DPRINTF(sc, WM_DEBUG_TX,
   8968 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8969 
   8970 	if (count != 0)
   8971 		rnd_add_uint32(&sc->rnd_source, count);
   8972 
   8973 	/*
   8974 	 * If there are no more pending transmissions, cancel the watchdog
   8975 	 * timer.
   8976 	 */
   8977 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8978 		txq->txq_sending = false;
   8979 
   8980 	return more;
   8981 }
   8982 
   8983 static inline uint32_t
   8984 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8985 {
   8986 	struct wm_softc *sc = rxq->rxq_sc;
   8987 
   8988 	if (sc->sc_type == WM_T_82574)
   8989 		return EXTRXC_STATUS(
   8990 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8991 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8992 		return NQRXC_STATUS(
   8993 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   8994 	else
   8995 		return rxq->rxq_descs[idx].wrx_status;
   8996 }
   8997 
   8998 static inline uint32_t
   8999 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9000 {
   9001 	struct wm_softc *sc = rxq->rxq_sc;
   9002 
   9003 	if (sc->sc_type == WM_T_82574)
   9004 		return EXTRXC_ERROR(
   9005 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9006 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9007 		return NQRXC_ERROR(
   9008 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9009 	else
   9010 		return rxq->rxq_descs[idx].wrx_errors;
   9011 }
   9012 
   9013 static inline uint16_t
   9014 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9015 {
   9016 	struct wm_softc *sc = rxq->rxq_sc;
   9017 
   9018 	if (sc->sc_type == WM_T_82574)
   9019 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9020 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9021 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9022 	else
   9023 		return rxq->rxq_descs[idx].wrx_special;
   9024 }
   9025 
   9026 static inline int
   9027 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9028 {
   9029 	struct wm_softc *sc = rxq->rxq_sc;
   9030 
   9031 	if (sc->sc_type == WM_T_82574)
   9032 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9033 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9034 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9035 	else
   9036 		return rxq->rxq_descs[idx].wrx_len;
   9037 }
   9038 
   9039 #ifdef WM_DEBUG
   9040 static inline uint32_t
   9041 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9042 {
   9043 	struct wm_softc *sc = rxq->rxq_sc;
   9044 
   9045 	if (sc->sc_type == WM_T_82574)
   9046 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9047 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9048 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9049 	else
   9050 		return 0;
   9051 }
   9052 
   9053 static inline uint8_t
   9054 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9055 {
   9056 	struct wm_softc *sc = rxq->rxq_sc;
   9057 
   9058 	if (sc->sc_type == WM_T_82574)
   9059 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9060 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9061 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9062 	else
   9063 		return 0;
   9064 }
   9065 #endif /* WM_DEBUG */
   9066 
   9067 static inline bool
   9068 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9069     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9070 {
   9071 
   9072 	if (sc->sc_type == WM_T_82574)
   9073 		return (status & ext_bit) != 0;
   9074 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9075 		return (status & nq_bit) != 0;
   9076 	else
   9077 		return (status & legacy_bit) != 0;
   9078 }
   9079 
   9080 static inline bool
   9081 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9082     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9083 {
   9084 
   9085 	if (sc->sc_type == WM_T_82574)
   9086 		return (error & ext_bit) != 0;
   9087 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9088 		return (error & nq_bit) != 0;
   9089 	else
   9090 		return (error & legacy_bit) != 0;
   9091 }
   9092 
   9093 static inline bool
   9094 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9095 {
   9096 
   9097 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9098 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9099 		return true;
   9100 	else
   9101 		return false;
   9102 }
   9103 
   9104 static inline bool
   9105 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9106 {
   9107 	struct wm_softc *sc = rxq->rxq_sc;
   9108 
   9109 	/* XXX missing error bit for newqueue? */
   9110 	if (wm_rxdesc_is_set_error(sc, errors,
   9111 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9112 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9113 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9114 		NQRXC_ERROR_RXE)) {
   9115 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9116 		    EXTRXC_ERROR_SE, 0))
   9117 			log(LOG_WARNING, "%s: symbol error\n",
   9118 			    device_xname(sc->sc_dev));
   9119 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9120 		    EXTRXC_ERROR_SEQ, 0))
   9121 			log(LOG_WARNING, "%s: receive sequence error\n",
   9122 			    device_xname(sc->sc_dev));
   9123 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9124 		    EXTRXC_ERROR_CE, 0))
   9125 			log(LOG_WARNING, "%s: CRC error\n",
   9126 			    device_xname(sc->sc_dev));
   9127 		return true;
   9128 	}
   9129 
   9130 	return false;
   9131 }
   9132 
   9133 static inline bool
   9134 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9135 {
   9136 	struct wm_softc *sc = rxq->rxq_sc;
   9137 
   9138 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9139 		NQRXC_STATUS_DD)) {
   9140 		/* We have processed all of the receive descriptors. */
   9141 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9142 		return false;
   9143 	}
   9144 
   9145 	return true;
   9146 }
   9147 
   9148 static inline bool
   9149 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9150     uint16_t vlantag, struct mbuf *m)
   9151 {
   9152 
   9153 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9154 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9155 		vlan_set_tag(m, le16toh(vlantag));
   9156 	}
   9157 
   9158 	return true;
   9159 }
   9160 
   9161 static inline void
   9162 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9163     uint32_t errors, struct mbuf *m)
   9164 {
   9165 	struct wm_softc *sc = rxq->rxq_sc;
   9166 
   9167 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9168 		if (wm_rxdesc_is_set_status(sc, status,
   9169 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9170 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9171 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9172 			if (wm_rxdesc_is_set_error(sc, errors,
   9173 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9174 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9175 		}
   9176 		if (wm_rxdesc_is_set_status(sc, status,
   9177 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9178 			/*
   9179 			 * Note: we don't know if this was TCP or UDP,
   9180 			 * so we just set both bits, and expect the
   9181 			 * upper layers to deal.
   9182 			 */
   9183 			WM_Q_EVCNT_INCR(rxq, tusum);
   9184 			m->m_pkthdr.csum_flags |=
   9185 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9186 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9187 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9188 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9189 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9190 		}
   9191 	}
   9192 }
   9193 
   9194 /*
   9195  * wm_rxeof:
   9196  *
   9197  *	Helper; handle receive interrupts.
   9198  */
   9199 static bool
   9200 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9201 {
   9202 	struct wm_softc *sc = rxq->rxq_sc;
   9203 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9204 	struct wm_rxsoft *rxs;
   9205 	struct mbuf *m;
   9206 	int i, len;
   9207 	int count = 0;
   9208 	uint32_t status, errors;
   9209 	uint16_t vlantag;
   9210 	bool more = false;
   9211 
   9212 	KASSERT(mutex_owned(rxq->rxq_lock));
   9213 
   9214 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9215 		if (limit-- == 0) {
   9216 			more = true;
   9217 			DPRINTF(sc, WM_DEBUG_RX,
   9218 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9219 				device_xname(sc->sc_dev), i));
   9220 			break;
   9221 		}
   9222 
   9223 		rxs = &rxq->rxq_soft[i];
   9224 
   9225 		DPRINTF(sc, WM_DEBUG_RX,
   9226 		    ("%s: RX: checking descriptor %d\n",
   9227 			device_xname(sc->sc_dev), i));
   9228 		wm_cdrxsync(rxq, i,
   9229 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9230 
   9231 		status = wm_rxdesc_get_status(rxq, i);
   9232 		errors = wm_rxdesc_get_errors(rxq, i);
   9233 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9234 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9235 #ifdef WM_DEBUG
   9236 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9237 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9238 #endif
   9239 
   9240 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9241 			break;
   9242 		}
   9243 
   9244 		count++;
   9245 		if (__predict_false(rxq->rxq_discard)) {
   9246 			DPRINTF(sc, WM_DEBUG_RX,
   9247 			    ("%s: RX: discarding contents of descriptor %d\n",
   9248 				device_xname(sc->sc_dev), i));
   9249 			wm_init_rxdesc(rxq, i);
   9250 			if (wm_rxdesc_is_eop(rxq, status)) {
   9251 				/* Reset our state. */
   9252 				DPRINTF(sc, WM_DEBUG_RX,
   9253 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9254 					device_xname(sc->sc_dev)));
   9255 				rxq->rxq_discard = 0;
   9256 			}
   9257 			continue;
   9258 		}
   9259 
   9260 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9261 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9262 
   9263 		m = rxs->rxs_mbuf;
   9264 
   9265 		/*
   9266 		 * Add a new receive buffer to the ring, unless of
   9267 		 * course the length is zero. Treat the latter as a
   9268 		 * failed mapping.
   9269 		 */
   9270 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9271 			/*
   9272 			 * Failed, throw away what we've done so
   9273 			 * far, and discard the rest of the packet.
   9274 			 */
   9275 			if_statinc(ifp, if_ierrors);
   9276 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9277 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9278 			wm_init_rxdesc(rxq, i);
   9279 			if (!wm_rxdesc_is_eop(rxq, status))
   9280 				rxq->rxq_discard = 1;
   9281 			if (rxq->rxq_head != NULL)
   9282 				m_freem(rxq->rxq_head);
   9283 			WM_RXCHAIN_RESET(rxq);
   9284 			DPRINTF(sc, WM_DEBUG_RX,
   9285 			    ("%s: RX: Rx buffer allocation failed, "
   9286 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9287 				rxq->rxq_discard ? " (discard)" : ""));
   9288 			continue;
   9289 		}
   9290 
   9291 		m->m_len = len;
   9292 		rxq->rxq_len += len;
   9293 		DPRINTF(sc, WM_DEBUG_RX,
   9294 		    ("%s: RX: buffer at %p len %d\n",
   9295 			device_xname(sc->sc_dev), m->m_data, len));
   9296 
   9297 		/* If this is not the end of the packet, keep looking. */
   9298 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9299 			WM_RXCHAIN_LINK(rxq, m);
   9300 			DPRINTF(sc, WM_DEBUG_RX,
   9301 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9302 				device_xname(sc->sc_dev), rxq->rxq_len));
   9303 			continue;
   9304 		}
   9305 
   9306 		/*
   9307 		 * Okay, we have the entire packet now. The chip is
   9308 		 * configured to include the FCS except I35[04], I21[01].
   9309 		 * (not all chips can be configured to strip it), so we need
   9310 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9311 		 * in RCTL register is always set, so we don't trim it.
   9312 		 * PCH2 and newer chip also not include FCS when jumbo
   9313 		 * frame is used to do workaround an errata.
   9314 		 * May need to adjust length of previous mbuf in the
   9315 		 * chain if the current mbuf is too short.
   9316 		 */
   9317 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9318 			if (m->m_len < ETHER_CRC_LEN) {
   9319 				rxq->rxq_tail->m_len
   9320 				    -= (ETHER_CRC_LEN - m->m_len);
   9321 				m->m_len = 0;
   9322 			} else
   9323 				m->m_len -= ETHER_CRC_LEN;
   9324 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9325 		} else
   9326 			len = rxq->rxq_len;
   9327 
   9328 		WM_RXCHAIN_LINK(rxq, m);
   9329 
   9330 		*rxq->rxq_tailp = NULL;
   9331 		m = rxq->rxq_head;
   9332 
   9333 		WM_RXCHAIN_RESET(rxq);
   9334 
   9335 		DPRINTF(sc, WM_DEBUG_RX,
   9336 		    ("%s: RX: have entire packet, len -> %d\n",
   9337 			device_xname(sc->sc_dev), len));
   9338 
   9339 		/* If an error occurred, update stats and drop the packet. */
   9340 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9341 			m_freem(m);
   9342 			continue;
   9343 		}
   9344 
   9345 		/* No errors.  Receive the packet. */
   9346 		m_set_rcvif(m, ifp);
   9347 		m->m_pkthdr.len = len;
   9348 		/*
   9349 		 * TODO
   9350 		 * should be save rsshash and rsstype to this mbuf.
   9351 		 */
   9352 		DPRINTF(sc, WM_DEBUG_RX,
   9353 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9354 			device_xname(sc->sc_dev), rsstype, rsshash));
   9355 
   9356 		/*
   9357 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9358 		 * for us.  Associate the tag with the packet.
   9359 		 */
   9360 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9361 			continue;
   9362 
   9363 		/* Set up checksum info for this packet. */
   9364 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9365 
   9366 		rxq->rxq_packets++;
   9367 		rxq->rxq_bytes += len;
   9368 		/* Pass it on. */
   9369 		if_percpuq_enqueue(sc->sc_ipq, m);
   9370 
   9371 		if (rxq->rxq_stopping)
   9372 			break;
   9373 	}
   9374 	rxq->rxq_ptr = i;
   9375 
   9376 	if (count != 0)
   9377 		rnd_add_uint32(&sc->rnd_source, count);
   9378 
   9379 	DPRINTF(sc, WM_DEBUG_RX,
   9380 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9381 
   9382 	return more;
   9383 }
   9384 
   9385 /*
   9386  * wm_linkintr_gmii:
   9387  *
   9388  *	Helper; handle link interrupts for GMII.
   9389  */
   9390 static void
   9391 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9392 {
   9393 	device_t dev = sc->sc_dev;
   9394 	uint32_t status, reg;
   9395 	bool link;
   9396 	int rv;
   9397 
   9398 	KASSERT(WM_CORE_LOCKED(sc));
   9399 
   9400 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9401 		__func__));
   9402 
   9403 	if ((icr & ICR_LSC) == 0) {
   9404 		if (icr & ICR_RXSEQ)
   9405 			DPRINTF(sc, WM_DEBUG_LINK,
   9406 			    ("%s: LINK Receive sequence error\n",
   9407 				device_xname(dev)));
   9408 		return;
   9409 	}
   9410 
   9411 	/* Link status changed */
   9412 	status = CSR_READ(sc, WMREG_STATUS);
   9413 	link = status & STATUS_LU;
   9414 	if (link) {
   9415 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9416 			device_xname(dev),
   9417 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9418 		if (wm_phy_need_linkdown_discard(sc))
   9419 			wm_clear_linkdown_discard(sc);
   9420 	} else {
   9421 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9422 			device_xname(dev)));
   9423 		if (wm_phy_need_linkdown_discard(sc))
   9424 			wm_set_linkdown_discard(sc);
   9425 	}
   9426 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9427 		wm_gig_downshift_workaround_ich8lan(sc);
   9428 
   9429 	if ((sc->sc_type == WM_T_ICH8)
   9430 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9431 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9432 	}
   9433 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9434 		device_xname(dev)));
   9435 	mii_pollstat(&sc->sc_mii);
   9436 	if (sc->sc_type == WM_T_82543) {
   9437 		int miistatus, active;
   9438 
   9439 		/*
   9440 		 * With 82543, we need to force speed and
   9441 		 * duplex on the MAC equal to what the PHY
   9442 		 * speed and duplex configuration is.
   9443 		 */
   9444 		miistatus = sc->sc_mii.mii_media_status;
   9445 
   9446 		if (miistatus & IFM_ACTIVE) {
   9447 			active = sc->sc_mii.mii_media_active;
   9448 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9449 			switch (IFM_SUBTYPE(active)) {
   9450 			case IFM_10_T:
   9451 				sc->sc_ctrl |= CTRL_SPEED_10;
   9452 				break;
   9453 			case IFM_100_TX:
   9454 				sc->sc_ctrl |= CTRL_SPEED_100;
   9455 				break;
   9456 			case IFM_1000_T:
   9457 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9458 				break;
   9459 			default:
   9460 				/*
   9461 				 * Fiber?
   9462 				 * Shoud not enter here.
   9463 				 */
   9464 				device_printf(dev, "unknown media (%x)\n",
   9465 				    active);
   9466 				break;
   9467 			}
   9468 			if (active & IFM_FDX)
   9469 				sc->sc_ctrl |= CTRL_FD;
   9470 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9471 		}
   9472 	} else if (sc->sc_type == WM_T_PCH) {
   9473 		wm_k1_gig_workaround_hv(sc,
   9474 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9475 	}
   9476 
   9477 	/*
   9478 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9479 	 * aggressive resulting in many collisions. To avoid this, increase
   9480 	 * the IPG and reduce Rx latency in the PHY.
   9481 	 */
   9482 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9483 	    && link) {
   9484 		uint32_t tipg_reg;
   9485 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9486 		bool fdx;
   9487 		uint16_t emi_addr, emi_val;
   9488 
   9489 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9490 		tipg_reg &= ~TIPG_IPGT_MASK;
   9491 		fdx = status & STATUS_FD;
   9492 
   9493 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9494 			tipg_reg |= 0xff;
   9495 			/* Reduce Rx latency in analog PHY */
   9496 			emi_val = 0;
   9497 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9498 		    fdx && speed != STATUS_SPEED_1000) {
   9499 			tipg_reg |= 0xc;
   9500 			emi_val = 1;
   9501 		} else {
   9502 			/* Roll back the default values */
   9503 			tipg_reg |= 0x08;
   9504 			emi_val = 1;
   9505 		}
   9506 
   9507 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9508 
   9509 		rv = sc->phy.acquire(sc);
   9510 		if (rv)
   9511 			return;
   9512 
   9513 		if (sc->sc_type == WM_T_PCH2)
   9514 			emi_addr = I82579_RX_CONFIG;
   9515 		else
   9516 			emi_addr = I217_RX_CONFIG;
   9517 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9518 
   9519 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9520 			uint16_t phy_reg;
   9521 
   9522 			sc->phy.readreg_locked(dev, 2,
   9523 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9524 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9525 			if (speed == STATUS_SPEED_100
   9526 			    || speed == STATUS_SPEED_10)
   9527 				phy_reg |= 0x3e8;
   9528 			else
   9529 				phy_reg |= 0xfa;
   9530 			sc->phy.writereg_locked(dev, 2,
   9531 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9532 
   9533 			if (speed == STATUS_SPEED_1000) {
   9534 				sc->phy.readreg_locked(dev, 2,
   9535 				    HV_PM_CTRL, &phy_reg);
   9536 
   9537 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9538 
   9539 				sc->phy.writereg_locked(dev, 2,
   9540 				    HV_PM_CTRL, phy_reg);
   9541 			}
   9542 		}
   9543 		sc->phy.release(sc);
   9544 
   9545 		if (rv)
   9546 			return;
   9547 
   9548 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9549 			uint16_t data, ptr_gap;
   9550 
   9551 			if (speed == STATUS_SPEED_1000) {
   9552 				rv = sc->phy.acquire(sc);
   9553 				if (rv)
   9554 					return;
   9555 
   9556 				rv = sc->phy.readreg_locked(dev, 2,
   9557 				    I82579_UNKNOWN1, &data);
   9558 				if (rv) {
   9559 					sc->phy.release(sc);
   9560 					return;
   9561 				}
   9562 
   9563 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9564 				if (ptr_gap < 0x18) {
   9565 					data &= ~(0x3ff << 2);
   9566 					data |= (0x18 << 2);
   9567 					rv = sc->phy.writereg_locked(dev,
   9568 					    2, I82579_UNKNOWN1, data);
   9569 				}
   9570 				sc->phy.release(sc);
   9571 				if (rv)
   9572 					return;
   9573 			} else {
   9574 				rv = sc->phy.acquire(sc);
   9575 				if (rv)
   9576 					return;
   9577 
   9578 				rv = sc->phy.writereg_locked(dev, 2,
   9579 				    I82579_UNKNOWN1, 0xc023);
   9580 				sc->phy.release(sc);
   9581 				if (rv)
   9582 					return;
   9583 
   9584 			}
   9585 		}
   9586 	}
   9587 
   9588 	/*
   9589 	 * I217 Packet Loss issue:
   9590 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9591 	 * on power up.
   9592 	 * Set the Beacon Duration for I217 to 8 usec
   9593 	 */
   9594 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9595 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9596 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9597 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9598 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9599 	}
   9600 
   9601 	/* Work-around I218 hang issue */
   9602 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9603 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9604 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9605 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9606 		wm_k1_workaround_lpt_lp(sc, link);
   9607 
   9608 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9609 		/*
   9610 		 * Set platform power management values for Latency
   9611 		 * Tolerance Reporting (LTR)
   9612 		 */
   9613 		wm_platform_pm_pch_lpt(sc,
   9614 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9615 	}
   9616 
   9617 	/* Clear link partner's EEE ability */
   9618 	sc->eee_lp_ability = 0;
   9619 
   9620 	/* FEXTNVM6 K1-off workaround */
   9621 	if (sc->sc_type == WM_T_PCH_SPT) {
   9622 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9623 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9624 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9625 		else
   9626 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9627 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9628 	}
   9629 
   9630 	if (!link)
   9631 		return;
   9632 
   9633 	switch (sc->sc_type) {
   9634 	case WM_T_PCH2:
   9635 		wm_k1_workaround_lv(sc);
   9636 		/* FALLTHROUGH */
   9637 	case WM_T_PCH:
   9638 		if (sc->sc_phytype == WMPHY_82578)
   9639 			wm_link_stall_workaround_hv(sc);
   9640 		break;
   9641 	default:
   9642 		break;
   9643 	}
   9644 
   9645 	/* Enable/Disable EEE after link up */
   9646 	if (sc->sc_phytype > WMPHY_82579)
   9647 		wm_set_eee_pchlan(sc);
   9648 }
   9649 
   9650 /*
   9651  * wm_linkintr_tbi:
   9652  *
   9653  *	Helper; handle link interrupts for TBI mode.
   9654  */
   9655 static void
   9656 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9657 {
   9658 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9659 	uint32_t status;
   9660 
   9661 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9662 		__func__));
   9663 
   9664 	status = CSR_READ(sc, WMREG_STATUS);
   9665 	if (icr & ICR_LSC) {
   9666 		wm_check_for_link(sc);
   9667 		if (status & STATUS_LU) {
   9668 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9669 				device_xname(sc->sc_dev),
   9670 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9671 			/*
   9672 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9673 			 * so we should update sc->sc_ctrl
   9674 			 */
   9675 
   9676 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9677 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9678 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9679 			if (status & STATUS_FD)
   9680 				sc->sc_tctl |=
   9681 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9682 			else
   9683 				sc->sc_tctl |=
   9684 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9685 			if (sc->sc_ctrl & CTRL_TFCE)
   9686 				sc->sc_fcrtl |= FCRTL_XONE;
   9687 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9688 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9689 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9690 			sc->sc_tbi_linkup = 1;
   9691 			if_link_state_change(ifp, LINK_STATE_UP);
   9692 		} else {
   9693 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9694 				device_xname(sc->sc_dev)));
   9695 			sc->sc_tbi_linkup = 0;
   9696 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9697 		}
   9698 		/* Update LED */
   9699 		wm_tbi_serdes_set_linkled(sc);
   9700 	} else if (icr & ICR_RXSEQ)
   9701 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9702 			device_xname(sc->sc_dev)));
   9703 }
   9704 
   9705 /*
   9706  * wm_linkintr_serdes:
   9707  *
   9708  *	Helper; handle link interrupts for TBI mode.
   9709  */
   9710 static void
   9711 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9712 {
   9713 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9714 	struct mii_data *mii = &sc->sc_mii;
   9715 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9716 	uint32_t pcs_adv, pcs_lpab, reg;
   9717 
   9718 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9719 		__func__));
   9720 
   9721 	if (icr & ICR_LSC) {
   9722 		/* Check PCS */
   9723 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9724 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9725 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9726 				device_xname(sc->sc_dev)));
   9727 			mii->mii_media_status |= IFM_ACTIVE;
   9728 			sc->sc_tbi_linkup = 1;
   9729 			if_link_state_change(ifp, LINK_STATE_UP);
   9730 		} else {
   9731 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9732 				device_xname(sc->sc_dev)));
   9733 			mii->mii_media_status |= IFM_NONE;
   9734 			sc->sc_tbi_linkup = 0;
   9735 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9736 			wm_tbi_serdes_set_linkled(sc);
   9737 			return;
   9738 		}
   9739 		mii->mii_media_active |= IFM_1000_SX;
   9740 		if ((reg & PCS_LSTS_FDX) != 0)
   9741 			mii->mii_media_active |= IFM_FDX;
   9742 		else
   9743 			mii->mii_media_active |= IFM_HDX;
   9744 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9745 			/* Check flow */
   9746 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9747 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9748 				DPRINTF(sc, WM_DEBUG_LINK,
   9749 				    ("XXX LINKOK but not ACOMP\n"));
   9750 				return;
   9751 			}
   9752 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9753 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9754 			DPRINTF(sc, WM_DEBUG_LINK,
   9755 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9756 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9757 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9758 				mii->mii_media_active |= IFM_FLOW
   9759 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9760 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9761 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9762 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9763 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9764 				mii->mii_media_active |= IFM_FLOW
   9765 				    | IFM_ETH_TXPAUSE;
   9766 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9767 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9768 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9769 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9770 				mii->mii_media_active |= IFM_FLOW
   9771 				    | IFM_ETH_RXPAUSE;
   9772 		}
   9773 		/* Update LED */
   9774 		wm_tbi_serdes_set_linkled(sc);
   9775 	} else
   9776 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9777 		    device_xname(sc->sc_dev)));
   9778 }
   9779 
   9780 /*
   9781  * wm_linkintr:
   9782  *
   9783  *	Helper; handle link interrupts.
   9784  */
   9785 static void
   9786 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9787 {
   9788 
   9789 	KASSERT(WM_CORE_LOCKED(sc));
   9790 
   9791 	if (sc->sc_flags & WM_F_HAS_MII)
   9792 		wm_linkintr_gmii(sc, icr);
   9793 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9794 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9795 		wm_linkintr_serdes(sc, icr);
   9796 	else
   9797 		wm_linkintr_tbi(sc, icr);
   9798 }
   9799 
   9800 
   9801 static inline void
   9802 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9803 {
   9804 
   9805 	if (wmq->wmq_txrx_use_workqueue)
   9806 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9807 	else
   9808 		softint_schedule(wmq->wmq_si);
   9809 }
   9810 
   9811 static inline void
   9812 wm_legacy_intr_disable(struct wm_softc *sc)
   9813 {
   9814 
   9815 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   9816 }
   9817 
   9818 static inline void
   9819 wm_legacy_intr_enable(struct wm_softc *sc)
   9820 {
   9821 
   9822 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   9823 }
   9824 
   9825 /*
   9826  * wm_intr_legacy:
   9827  *
   9828  *	Interrupt service routine for INTx and MSI.
   9829  */
   9830 static int
   9831 wm_intr_legacy(void *arg)
   9832 {
   9833 	struct wm_softc *sc = arg;
   9834 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9835 	struct wm_queue *wmq = &sc->sc_queue[0];
   9836 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9837 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9838 	uint32_t icr, rndval = 0;
   9839 	int handled = 0;
   9840 	bool more = false;
   9841 
   9842 	while (1 /* CONSTCOND */) {
   9843 		icr = CSR_READ(sc, WMREG_ICR);
   9844 		if ((icr & sc->sc_icr) == 0)
   9845 			break;
   9846 		if (handled == 0)
   9847 			DPRINTF(sc, WM_DEBUG_TX,
   9848 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9849 		if (rndval == 0)
   9850 			rndval = icr;
   9851 
   9852 		mutex_enter(rxq->rxq_lock);
   9853 
   9854 		if (rxq->rxq_stopping) {
   9855 			mutex_exit(rxq->rxq_lock);
   9856 			break;
   9857 		}
   9858 
   9859 		handled = 1;
   9860 
   9861 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9862 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9863 			DPRINTF(sc, WM_DEBUG_RX,
   9864 			    ("%s: RX: got Rx intr 0x%08x\n",
   9865 				device_xname(sc->sc_dev),
   9866 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9867 			WM_Q_EVCNT_INCR(rxq, intr);
   9868 		}
   9869 #endif
   9870 		/*
   9871 		 * wm_rxeof() does *not* call upper layer functions directly,
   9872 		 * as if_percpuq_enqueue() just call softint_schedule().
   9873 		 * So, we can call wm_rxeof() in interrupt context.
   9874 		 */
   9875 		more = wm_rxeof(rxq, UINT_MAX);
   9876 
   9877 		mutex_exit(rxq->rxq_lock);
   9878 		mutex_enter(txq->txq_lock);
   9879 
   9880 		if (txq->txq_stopping) {
   9881 			mutex_exit(txq->txq_lock);
   9882 			break;
   9883 		}
   9884 
   9885 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9886 		if (icr & ICR_TXDW) {
   9887 			DPRINTF(sc, WM_DEBUG_TX,
   9888 			    ("%s: TX: got TXDW interrupt\n",
   9889 				device_xname(sc->sc_dev)));
   9890 			WM_Q_EVCNT_INCR(txq, txdw);
   9891 		}
   9892 #endif
   9893 		more |= wm_txeof(txq, UINT_MAX);
   9894 		if (!IF_IS_EMPTY(&ifp->if_snd))
   9895 			more = true;
   9896 
   9897 		mutex_exit(txq->txq_lock);
   9898 		WM_CORE_LOCK(sc);
   9899 
   9900 		if (sc->sc_core_stopping) {
   9901 			WM_CORE_UNLOCK(sc);
   9902 			break;
   9903 		}
   9904 
   9905 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9906 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9907 			wm_linkintr(sc, icr);
   9908 		}
   9909 		if ((icr & ICR_GPI(0)) != 0)
   9910 			device_printf(sc->sc_dev, "got module interrupt\n");
   9911 
   9912 		WM_CORE_UNLOCK(sc);
   9913 
   9914 		if (icr & ICR_RXO) {
   9915 #if defined(WM_DEBUG)
   9916 			log(LOG_WARNING, "%s: Receive overrun\n",
   9917 			    device_xname(sc->sc_dev));
   9918 #endif /* defined(WM_DEBUG) */
   9919 		}
   9920 	}
   9921 
   9922 	rnd_add_uint32(&sc->rnd_source, rndval);
   9923 
   9924 	if (more) {
   9925 		/* Try to get more packets going. */
   9926 		wm_legacy_intr_disable(sc);
   9927 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9928 		wm_sched_handle_queue(sc, wmq);
   9929 	}
   9930 
   9931 	return handled;
   9932 }
   9933 
   9934 static inline void
   9935 wm_txrxintr_disable(struct wm_queue *wmq)
   9936 {
   9937 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9938 
   9939 	if (__predict_false(!wm_is_using_msix(sc))) {
   9940 		return wm_legacy_intr_disable(sc);
   9941 	}
   9942 
   9943 	if (sc->sc_type == WM_T_82574)
   9944 		CSR_WRITE(sc, WMREG_IMC,
   9945 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9946 	else if (sc->sc_type == WM_T_82575)
   9947 		CSR_WRITE(sc, WMREG_EIMC,
   9948 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9949 	else
   9950 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9951 }
   9952 
   9953 static inline void
   9954 wm_txrxintr_enable(struct wm_queue *wmq)
   9955 {
   9956 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9957 
   9958 	wm_itrs_calculate(sc, wmq);
   9959 
   9960 	if (__predict_false(!wm_is_using_msix(sc))) {
   9961 		return wm_legacy_intr_enable(sc);
   9962 	}
   9963 
   9964 	/*
   9965 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9966 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9967 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9968 	 * while each wm_handle_queue(wmq) is runnig.
   9969 	 */
   9970 	if (sc->sc_type == WM_T_82574)
   9971 		CSR_WRITE(sc, WMREG_IMS,
   9972 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9973 	else if (sc->sc_type == WM_T_82575)
   9974 		CSR_WRITE(sc, WMREG_EIMS,
   9975 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9976 	else
   9977 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9978 }
   9979 
   9980 static int
   9981 wm_txrxintr_msix(void *arg)
   9982 {
   9983 	struct wm_queue *wmq = arg;
   9984 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9985 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9986 	struct wm_softc *sc = txq->txq_sc;
   9987 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9988 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9989 	bool txmore;
   9990 	bool rxmore;
   9991 
   9992 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9993 
   9994 	DPRINTF(sc, WM_DEBUG_TX,
   9995 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9996 
   9997 	wm_txrxintr_disable(wmq);
   9998 
   9999 	mutex_enter(txq->txq_lock);
   10000 
   10001 	if (txq->txq_stopping) {
   10002 		mutex_exit(txq->txq_lock);
   10003 		return 0;
   10004 	}
   10005 
   10006 	WM_Q_EVCNT_INCR(txq, txdw);
   10007 	txmore = wm_txeof(txq, txlimit);
   10008 	/* wm_deferred start() is done in wm_handle_queue(). */
   10009 	mutex_exit(txq->txq_lock);
   10010 
   10011 	DPRINTF(sc, WM_DEBUG_RX,
   10012 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10013 	mutex_enter(rxq->rxq_lock);
   10014 
   10015 	if (rxq->rxq_stopping) {
   10016 		mutex_exit(rxq->rxq_lock);
   10017 		return 0;
   10018 	}
   10019 
   10020 	WM_Q_EVCNT_INCR(rxq, intr);
   10021 	rxmore = wm_rxeof(rxq, rxlimit);
   10022 	mutex_exit(rxq->rxq_lock);
   10023 
   10024 	wm_itrs_writereg(sc, wmq);
   10025 
   10026 	if (txmore || rxmore) {
   10027 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10028 		wm_sched_handle_queue(sc, wmq);
   10029 	} else
   10030 		wm_txrxintr_enable(wmq);
   10031 
   10032 	return 1;
   10033 }
   10034 
   10035 static void
   10036 wm_handle_queue(void *arg)
   10037 {
   10038 	struct wm_queue *wmq = arg;
   10039 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10040 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10041 	struct wm_softc *sc = txq->txq_sc;
   10042 	u_int txlimit = sc->sc_tx_process_limit;
   10043 	u_int rxlimit = sc->sc_rx_process_limit;
   10044 	bool txmore;
   10045 	bool rxmore;
   10046 
   10047 	mutex_enter(txq->txq_lock);
   10048 	if (txq->txq_stopping) {
   10049 		mutex_exit(txq->txq_lock);
   10050 		return;
   10051 	}
   10052 	txmore = wm_txeof(txq, txlimit);
   10053 	wm_deferred_start_locked(txq);
   10054 	mutex_exit(txq->txq_lock);
   10055 
   10056 	mutex_enter(rxq->rxq_lock);
   10057 	if (rxq->rxq_stopping) {
   10058 		mutex_exit(rxq->rxq_lock);
   10059 		return;
   10060 	}
   10061 	WM_Q_EVCNT_INCR(rxq, defer);
   10062 	rxmore = wm_rxeof(rxq, rxlimit);
   10063 	mutex_exit(rxq->rxq_lock);
   10064 
   10065 	if (txmore || rxmore) {
   10066 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10067 		wm_sched_handle_queue(sc, wmq);
   10068 	} else
   10069 		wm_txrxintr_enable(wmq);
   10070 }
   10071 
   10072 static void
   10073 wm_handle_queue_work(struct work *wk, void *context)
   10074 {
   10075 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10076 
   10077 	/*
   10078 	 * "enqueued flag" is not required here.
   10079 	 */
   10080 	wm_handle_queue(wmq);
   10081 }
   10082 
   10083 /*
   10084  * wm_linkintr_msix:
   10085  *
   10086  *	Interrupt service routine for link status change for MSI-X.
   10087  */
   10088 static int
   10089 wm_linkintr_msix(void *arg)
   10090 {
   10091 	struct wm_softc *sc = arg;
   10092 	uint32_t reg;
   10093 	bool has_rxo;
   10094 
   10095 	reg = CSR_READ(sc, WMREG_ICR);
   10096 	WM_CORE_LOCK(sc);
   10097 	DPRINTF(sc, WM_DEBUG_LINK,
   10098 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10099 		device_xname(sc->sc_dev), reg));
   10100 
   10101 	if (sc->sc_core_stopping)
   10102 		goto out;
   10103 
   10104 	if ((reg & ICR_LSC) != 0) {
   10105 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10106 		wm_linkintr(sc, ICR_LSC);
   10107 	}
   10108 	if ((reg & ICR_GPI(0)) != 0)
   10109 		device_printf(sc->sc_dev, "got module interrupt\n");
   10110 
   10111 	/*
   10112 	 * XXX 82574 MSI-X mode workaround
   10113 	 *
   10114 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10115 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10116 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10117 	 * interrupts by writing WMREG_ICS to process receive packets.
   10118 	 */
   10119 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10120 #if defined(WM_DEBUG)
   10121 		log(LOG_WARNING, "%s: Receive overrun\n",
   10122 		    device_xname(sc->sc_dev));
   10123 #endif /* defined(WM_DEBUG) */
   10124 
   10125 		has_rxo = true;
   10126 		/*
   10127 		 * The RXO interrupt is very high rate when receive traffic is
   10128 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10129 		 * interrupts. ICR_OTHER will be enabled at the end of
   10130 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10131 		 * ICR_RXQ(1) interrupts.
   10132 		 */
   10133 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10134 
   10135 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10136 	}
   10137 
   10138 
   10139 
   10140 out:
   10141 	WM_CORE_UNLOCK(sc);
   10142 
   10143 	if (sc->sc_type == WM_T_82574) {
   10144 		if (!has_rxo)
   10145 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10146 		else
   10147 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10148 	} else if (sc->sc_type == WM_T_82575)
   10149 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10150 	else
   10151 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10152 
   10153 	return 1;
   10154 }
   10155 
   10156 /*
   10157  * Media related.
   10158  * GMII, SGMII, TBI (and SERDES)
   10159  */
   10160 
   10161 /* Common */
   10162 
   10163 /*
   10164  * wm_tbi_serdes_set_linkled:
   10165  *
   10166  *	Update the link LED on TBI and SERDES devices.
   10167  */
   10168 static void
   10169 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10170 {
   10171 
   10172 	if (sc->sc_tbi_linkup)
   10173 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10174 	else
   10175 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10176 
   10177 	/* 82540 or newer devices are active low */
   10178 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10179 
   10180 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10181 }
   10182 
   10183 /* GMII related */
   10184 
   10185 /*
   10186  * wm_gmii_reset:
   10187  *
   10188  *	Reset the PHY.
   10189  */
   10190 static void
   10191 wm_gmii_reset(struct wm_softc *sc)
   10192 {
   10193 	uint32_t reg;
   10194 	int rv;
   10195 
   10196 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10197 		device_xname(sc->sc_dev), __func__));
   10198 
   10199 	rv = sc->phy.acquire(sc);
   10200 	if (rv != 0) {
   10201 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10202 		    __func__);
   10203 		return;
   10204 	}
   10205 
   10206 	switch (sc->sc_type) {
   10207 	case WM_T_82542_2_0:
   10208 	case WM_T_82542_2_1:
   10209 		/* null */
   10210 		break;
   10211 	case WM_T_82543:
   10212 		/*
   10213 		 * With 82543, we need to force speed and duplex on the MAC
   10214 		 * equal to what the PHY speed and duplex configuration is.
   10215 		 * In addition, we need to perform a hardware reset on the PHY
   10216 		 * to take it out of reset.
   10217 		 */
   10218 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10219 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10220 
   10221 		/* The PHY reset pin is active-low. */
   10222 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10223 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10224 		    CTRL_EXT_SWDPIN(4));
   10225 		reg |= CTRL_EXT_SWDPIO(4);
   10226 
   10227 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10228 		CSR_WRITE_FLUSH(sc);
   10229 		delay(10*1000);
   10230 
   10231 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10232 		CSR_WRITE_FLUSH(sc);
   10233 		delay(150);
   10234 #if 0
   10235 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10236 #endif
   10237 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10238 		break;
   10239 	case WM_T_82544:	/* Reset 10000us */
   10240 	case WM_T_82540:
   10241 	case WM_T_82545:
   10242 	case WM_T_82545_3:
   10243 	case WM_T_82546:
   10244 	case WM_T_82546_3:
   10245 	case WM_T_82541:
   10246 	case WM_T_82541_2:
   10247 	case WM_T_82547:
   10248 	case WM_T_82547_2:
   10249 	case WM_T_82571:	/* Reset 100us */
   10250 	case WM_T_82572:
   10251 	case WM_T_82573:
   10252 	case WM_T_82574:
   10253 	case WM_T_82575:
   10254 	case WM_T_82576:
   10255 	case WM_T_82580:
   10256 	case WM_T_I350:
   10257 	case WM_T_I354:
   10258 	case WM_T_I210:
   10259 	case WM_T_I211:
   10260 	case WM_T_82583:
   10261 	case WM_T_80003:
   10262 		/* Generic reset */
   10263 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10264 		CSR_WRITE_FLUSH(sc);
   10265 		delay(20000);
   10266 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10267 		CSR_WRITE_FLUSH(sc);
   10268 		delay(20000);
   10269 
   10270 		if ((sc->sc_type == WM_T_82541)
   10271 		    || (sc->sc_type == WM_T_82541_2)
   10272 		    || (sc->sc_type == WM_T_82547)
   10273 		    || (sc->sc_type == WM_T_82547_2)) {
   10274 			/* Workaround for igp are done in igp_reset() */
   10275 			/* XXX add code to set LED after phy reset */
   10276 		}
   10277 		break;
   10278 	case WM_T_ICH8:
   10279 	case WM_T_ICH9:
   10280 	case WM_T_ICH10:
   10281 	case WM_T_PCH:
   10282 	case WM_T_PCH2:
   10283 	case WM_T_PCH_LPT:
   10284 	case WM_T_PCH_SPT:
   10285 	case WM_T_PCH_CNP:
   10286 		/* Generic reset */
   10287 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10288 		CSR_WRITE_FLUSH(sc);
   10289 		delay(100);
   10290 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10291 		CSR_WRITE_FLUSH(sc);
   10292 		delay(150);
   10293 		break;
   10294 	default:
   10295 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10296 		    __func__);
   10297 		break;
   10298 	}
   10299 
   10300 	sc->phy.release(sc);
   10301 
   10302 	/* get_cfg_done */
   10303 	wm_get_cfg_done(sc);
   10304 
   10305 	/* Extra setup */
   10306 	switch (sc->sc_type) {
   10307 	case WM_T_82542_2_0:
   10308 	case WM_T_82542_2_1:
   10309 	case WM_T_82543:
   10310 	case WM_T_82544:
   10311 	case WM_T_82540:
   10312 	case WM_T_82545:
   10313 	case WM_T_82545_3:
   10314 	case WM_T_82546:
   10315 	case WM_T_82546_3:
   10316 	case WM_T_82541_2:
   10317 	case WM_T_82547_2:
   10318 	case WM_T_82571:
   10319 	case WM_T_82572:
   10320 	case WM_T_82573:
   10321 	case WM_T_82574:
   10322 	case WM_T_82583:
   10323 	case WM_T_82575:
   10324 	case WM_T_82576:
   10325 	case WM_T_82580:
   10326 	case WM_T_I350:
   10327 	case WM_T_I354:
   10328 	case WM_T_I210:
   10329 	case WM_T_I211:
   10330 	case WM_T_80003:
   10331 		/* Null */
   10332 		break;
   10333 	case WM_T_82541:
   10334 	case WM_T_82547:
   10335 		/* XXX Configure actively LED after PHY reset */
   10336 		break;
   10337 	case WM_T_ICH8:
   10338 	case WM_T_ICH9:
   10339 	case WM_T_ICH10:
   10340 	case WM_T_PCH:
   10341 	case WM_T_PCH2:
   10342 	case WM_T_PCH_LPT:
   10343 	case WM_T_PCH_SPT:
   10344 	case WM_T_PCH_CNP:
   10345 		wm_phy_post_reset(sc);
   10346 		break;
   10347 	default:
   10348 		panic("%s: unknown type\n", __func__);
   10349 		break;
   10350 	}
   10351 }
   10352 
   10353 /*
   10354  * Setup sc_phytype and mii_{read|write}reg.
   10355  *
   10356  *  To identify PHY type, correct read/write function should be selected.
   10357  * To select correct read/write function, PCI ID or MAC type are required
   10358  * without accessing PHY registers.
   10359  *
   10360  *  On the first call of this function, PHY ID is not known yet. Check
   10361  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10362  * result might be incorrect.
   10363  *
   10364  *  In the second call, PHY OUI and model is used to identify PHY type.
   10365  * It might not be perfect because of the lack of compared entry, but it
   10366  * would be better than the first call.
   10367  *
   10368  *  If the detected new result and previous assumption is different,
   10369  * diagnous message will be printed.
   10370  */
   10371 static void
   10372 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10373     uint16_t phy_model)
   10374 {
   10375 	device_t dev = sc->sc_dev;
   10376 	struct mii_data *mii = &sc->sc_mii;
   10377 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10378 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10379 	mii_readreg_t new_readreg;
   10380 	mii_writereg_t new_writereg;
   10381 	bool dodiag = true;
   10382 
   10383 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10384 		device_xname(sc->sc_dev), __func__));
   10385 
   10386 	/*
   10387 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10388 	 * incorrect. So don't print diag output when it's 2nd call.
   10389 	 */
   10390 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10391 		dodiag = false;
   10392 
   10393 	if (mii->mii_readreg == NULL) {
   10394 		/*
   10395 		 *  This is the first call of this function. For ICH and PCH
   10396 		 * variants, it's difficult to determine the PHY access method
   10397 		 * by sc_type, so use the PCI product ID for some devices.
   10398 		 */
   10399 
   10400 		switch (sc->sc_pcidevid) {
   10401 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10402 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10403 			/* 82577 */
   10404 			new_phytype = WMPHY_82577;
   10405 			break;
   10406 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10407 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10408 			/* 82578 */
   10409 			new_phytype = WMPHY_82578;
   10410 			break;
   10411 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10412 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10413 			/* 82579 */
   10414 			new_phytype = WMPHY_82579;
   10415 			break;
   10416 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10417 		case PCI_PRODUCT_INTEL_82801I_BM:
   10418 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10419 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10420 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10421 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10422 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10423 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10424 			/* ICH8, 9, 10 with 82567 */
   10425 			new_phytype = WMPHY_BM;
   10426 			break;
   10427 		default:
   10428 			break;
   10429 		}
   10430 	} else {
   10431 		/* It's not the first call. Use PHY OUI and model */
   10432 		switch (phy_oui) {
   10433 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10434 			switch (phy_model) {
   10435 			case 0x0004: /* XXX */
   10436 				new_phytype = WMPHY_82578;
   10437 				break;
   10438 			default:
   10439 				break;
   10440 			}
   10441 			break;
   10442 		case MII_OUI_xxMARVELL:
   10443 			switch (phy_model) {
   10444 			case MII_MODEL_xxMARVELL_I210:
   10445 				new_phytype = WMPHY_I210;
   10446 				break;
   10447 			case MII_MODEL_xxMARVELL_E1011:
   10448 			case MII_MODEL_xxMARVELL_E1000_3:
   10449 			case MII_MODEL_xxMARVELL_E1000_5:
   10450 			case MII_MODEL_xxMARVELL_E1112:
   10451 				new_phytype = WMPHY_M88;
   10452 				break;
   10453 			case MII_MODEL_xxMARVELL_E1149:
   10454 				new_phytype = WMPHY_BM;
   10455 				break;
   10456 			case MII_MODEL_xxMARVELL_E1111:
   10457 			case MII_MODEL_xxMARVELL_I347:
   10458 			case MII_MODEL_xxMARVELL_E1512:
   10459 			case MII_MODEL_xxMARVELL_E1340M:
   10460 			case MII_MODEL_xxMARVELL_E1543:
   10461 				new_phytype = WMPHY_M88;
   10462 				break;
   10463 			case MII_MODEL_xxMARVELL_I82563:
   10464 				new_phytype = WMPHY_GG82563;
   10465 				break;
   10466 			default:
   10467 				break;
   10468 			}
   10469 			break;
   10470 		case MII_OUI_INTEL:
   10471 			switch (phy_model) {
   10472 			case MII_MODEL_INTEL_I82577:
   10473 				new_phytype = WMPHY_82577;
   10474 				break;
   10475 			case MII_MODEL_INTEL_I82579:
   10476 				new_phytype = WMPHY_82579;
   10477 				break;
   10478 			case MII_MODEL_INTEL_I217:
   10479 				new_phytype = WMPHY_I217;
   10480 				break;
   10481 			case MII_MODEL_INTEL_I82580:
   10482 				new_phytype = WMPHY_82580;
   10483 				break;
   10484 			case MII_MODEL_INTEL_I350:
   10485 				new_phytype = WMPHY_I350;
   10486 				break;
   10487 				break;
   10488 			default:
   10489 				break;
   10490 			}
   10491 			break;
   10492 		case MII_OUI_yyINTEL:
   10493 			switch (phy_model) {
   10494 			case MII_MODEL_yyINTEL_I82562G:
   10495 			case MII_MODEL_yyINTEL_I82562EM:
   10496 			case MII_MODEL_yyINTEL_I82562ET:
   10497 				new_phytype = WMPHY_IFE;
   10498 				break;
   10499 			case MII_MODEL_yyINTEL_IGP01E1000:
   10500 				new_phytype = WMPHY_IGP;
   10501 				break;
   10502 			case MII_MODEL_yyINTEL_I82566:
   10503 				new_phytype = WMPHY_IGP_3;
   10504 				break;
   10505 			default:
   10506 				break;
   10507 			}
   10508 			break;
   10509 		default:
   10510 			break;
   10511 		}
   10512 
   10513 		if (dodiag) {
   10514 			if (new_phytype == WMPHY_UNKNOWN)
   10515 				aprint_verbose_dev(dev,
   10516 				    "%s: Unknown PHY model. OUI=%06x, "
   10517 				    "model=%04x\n", __func__, phy_oui,
   10518 				    phy_model);
   10519 
   10520 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10521 			    && (sc->sc_phytype != new_phytype)) {
   10522 				aprint_error_dev(dev, "Previously assumed PHY "
   10523 				    "type(%u) was incorrect. PHY type from PHY"
   10524 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10525 			}
   10526 		}
   10527 	}
   10528 
   10529 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10530 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10531 		/* SGMII */
   10532 		new_readreg = wm_sgmii_readreg;
   10533 		new_writereg = wm_sgmii_writereg;
   10534 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10535 		/* BM2 (phyaddr == 1) */
   10536 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10537 		    && (new_phytype != WMPHY_BM)
   10538 		    && (new_phytype != WMPHY_UNKNOWN))
   10539 			doubt_phytype = new_phytype;
   10540 		new_phytype = WMPHY_BM;
   10541 		new_readreg = wm_gmii_bm_readreg;
   10542 		new_writereg = wm_gmii_bm_writereg;
   10543 	} else if (sc->sc_type >= WM_T_PCH) {
   10544 		/* All PCH* use _hv_ */
   10545 		new_readreg = wm_gmii_hv_readreg;
   10546 		new_writereg = wm_gmii_hv_writereg;
   10547 	} else if (sc->sc_type >= WM_T_ICH8) {
   10548 		/* non-82567 ICH8, 9 and 10 */
   10549 		new_readreg = wm_gmii_i82544_readreg;
   10550 		new_writereg = wm_gmii_i82544_writereg;
   10551 	} else if (sc->sc_type >= WM_T_80003) {
   10552 		/* 80003 */
   10553 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10554 		    && (new_phytype != WMPHY_GG82563)
   10555 		    && (new_phytype != WMPHY_UNKNOWN))
   10556 			doubt_phytype = new_phytype;
   10557 		new_phytype = WMPHY_GG82563;
   10558 		new_readreg = wm_gmii_i80003_readreg;
   10559 		new_writereg = wm_gmii_i80003_writereg;
   10560 	} else if (sc->sc_type >= WM_T_I210) {
   10561 		/* I210 and I211 */
   10562 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10563 		    && (new_phytype != WMPHY_I210)
   10564 		    && (new_phytype != WMPHY_UNKNOWN))
   10565 			doubt_phytype = new_phytype;
   10566 		new_phytype = WMPHY_I210;
   10567 		new_readreg = wm_gmii_gs40g_readreg;
   10568 		new_writereg = wm_gmii_gs40g_writereg;
   10569 	} else if (sc->sc_type >= WM_T_82580) {
   10570 		/* 82580, I350 and I354 */
   10571 		new_readreg = wm_gmii_82580_readreg;
   10572 		new_writereg = wm_gmii_82580_writereg;
   10573 	} else if (sc->sc_type >= WM_T_82544) {
   10574 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10575 		new_readreg = wm_gmii_i82544_readreg;
   10576 		new_writereg = wm_gmii_i82544_writereg;
   10577 	} else {
   10578 		new_readreg = wm_gmii_i82543_readreg;
   10579 		new_writereg = wm_gmii_i82543_writereg;
   10580 	}
   10581 
   10582 	if (new_phytype == WMPHY_BM) {
   10583 		/* All BM use _bm_ */
   10584 		new_readreg = wm_gmii_bm_readreg;
   10585 		new_writereg = wm_gmii_bm_writereg;
   10586 	}
   10587 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10588 		/* All PCH* use _hv_ */
   10589 		new_readreg = wm_gmii_hv_readreg;
   10590 		new_writereg = wm_gmii_hv_writereg;
   10591 	}
   10592 
   10593 	/* Diag output */
   10594 	if (dodiag) {
   10595 		if (doubt_phytype != WMPHY_UNKNOWN)
   10596 			aprint_error_dev(dev, "Assumed new PHY type was "
   10597 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10598 			    new_phytype);
   10599 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10600 		    && (sc->sc_phytype != new_phytype))
   10601 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10602 			    "was incorrect. New PHY type = %u\n",
   10603 			    sc->sc_phytype, new_phytype);
   10604 
   10605 		if ((mii->mii_readreg != NULL) &&
   10606 		    (new_phytype == WMPHY_UNKNOWN))
   10607 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10608 
   10609 		if ((mii->mii_readreg != NULL) &&
   10610 		    (mii->mii_readreg != new_readreg))
   10611 			aprint_error_dev(dev, "Previously assumed PHY "
   10612 			    "read/write function was incorrect.\n");
   10613 	}
   10614 
   10615 	/* Update now */
   10616 	sc->sc_phytype = new_phytype;
   10617 	mii->mii_readreg = new_readreg;
   10618 	mii->mii_writereg = new_writereg;
   10619 	if (new_readreg == wm_gmii_hv_readreg) {
   10620 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10621 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10622 	} else if (new_readreg == wm_sgmii_readreg) {
   10623 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10624 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10625 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10626 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10627 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10628 	}
   10629 }
   10630 
   10631 /*
   10632  * wm_get_phy_id_82575:
   10633  *
   10634  * Return PHY ID. Return -1 if it failed.
   10635  */
   10636 static int
   10637 wm_get_phy_id_82575(struct wm_softc *sc)
   10638 {
   10639 	uint32_t reg;
   10640 	int phyid = -1;
   10641 
   10642 	/* XXX */
   10643 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10644 		return -1;
   10645 
   10646 	if (wm_sgmii_uses_mdio(sc)) {
   10647 		switch (sc->sc_type) {
   10648 		case WM_T_82575:
   10649 		case WM_T_82576:
   10650 			reg = CSR_READ(sc, WMREG_MDIC);
   10651 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10652 			break;
   10653 		case WM_T_82580:
   10654 		case WM_T_I350:
   10655 		case WM_T_I354:
   10656 		case WM_T_I210:
   10657 		case WM_T_I211:
   10658 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10659 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10660 			break;
   10661 		default:
   10662 			return -1;
   10663 		}
   10664 	}
   10665 
   10666 	return phyid;
   10667 }
   10668 
   10669 /*
   10670  * wm_gmii_mediainit:
   10671  *
   10672  *	Initialize media for use on 1000BASE-T devices.
   10673  */
   10674 static void
   10675 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10676 {
   10677 	device_t dev = sc->sc_dev;
   10678 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10679 	struct mii_data *mii = &sc->sc_mii;
   10680 
   10681 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10682 		device_xname(sc->sc_dev), __func__));
   10683 
   10684 	/* We have GMII. */
   10685 	sc->sc_flags |= WM_F_HAS_MII;
   10686 
   10687 	if (sc->sc_type == WM_T_80003)
   10688 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10689 	else
   10690 		sc->sc_tipg = TIPG_1000T_DFLT;
   10691 
   10692 	/*
   10693 	 * Let the chip set speed/duplex on its own based on
   10694 	 * signals from the PHY.
   10695 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10696 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10697 	 */
   10698 	sc->sc_ctrl |= CTRL_SLU;
   10699 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10700 
   10701 	/* Initialize our media structures and probe the GMII. */
   10702 	mii->mii_ifp = ifp;
   10703 
   10704 	mii->mii_statchg = wm_gmii_statchg;
   10705 
   10706 	/* get PHY control from SMBus to PCIe */
   10707 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10708 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10709 	    || (sc->sc_type == WM_T_PCH_CNP))
   10710 		wm_init_phy_workarounds_pchlan(sc);
   10711 
   10712 	wm_gmii_reset(sc);
   10713 
   10714 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10715 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10716 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10717 
   10718 	/* Setup internal SGMII PHY for SFP */
   10719 	wm_sgmii_sfp_preconfig(sc);
   10720 
   10721 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10722 	    || (sc->sc_type == WM_T_82580)
   10723 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10724 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10725 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10726 			/* Attach only one port */
   10727 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10728 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10729 		} else {
   10730 			int i, id;
   10731 			uint32_t ctrl_ext;
   10732 
   10733 			id = wm_get_phy_id_82575(sc);
   10734 			if (id != -1) {
   10735 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10736 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10737 			}
   10738 			if ((id == -1)
   10739 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10740 				/* Power on sgmii phy if it is disabled */
   10741 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10742 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10743 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10744 				CSR_WRITE_FLUSH(sc);
   10745 				delay(300*1000); /* XXX too long */
   10746 
   10747 				/*
   10748 				 * From 1 to 8.
   10749 				 *
   10750 				 * I2C access fails with I2C register's ERROR
   10751 				 * bit set, so prevent error message while
   10752 				 * scanning.
   10753 				 */
   10754 				sc->phy.no_errprint = true;
   10755 				for (i = 1; i < 8; i++)
   10756 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10757 					    0xffffffff, i, MII_OFFSET_ANY,
   10758 					    MIIF_DOPAUSE);
   10759 				sc->phy.no_errprint = false;
   10760 
   10761 				/* Restore previous sfp cage power state */
   10762 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10763 			}
   10764 		}
   10765 	} else
   10766 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10767 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10768 
   10769 	/*
   10770 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10771 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10772 	 */
   10773 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10774 		|| (sc->sc_type == WM_T_PCH_SPT)
   10775 		|| (sc->sc_type == WM_T_PCH_CNP))
   10776 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10777 		wm_set_mdio_slow_mode_hv(sc);
   10778 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10779 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10780 	}
   10781 
   10782 	/*
   10783 	 * (For ICH8 variants)
   10784 	 * If PHY detection failed, use BM's r/w function and retry.
   10785 	 */
   10786 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10787 		/* if failed, retry with *_bm_* */
   10788 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10789 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10790 		    sc->sc_phytype);
   10791 		sc->sc_phytype = WMPHY_BM;
   10792 		mii->mii_readreg = wm_gmii_bm_readreg;
   10793 		mii->mii_writereg = wm_gmii_bm_writereg;
   10794 
   10795 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10796 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10797 	}
   10798 
   10799 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10800 		/* Any PHY wasn't find */
   10801 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10802 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10803 		sc->sc_phytype = WMPHY_NONE;
   10804 	} else {
   10805 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10806 
   10807 		/*
   10808 		 * PHY Found! Check PHY type again by the second call of
   10809 		 * wm_gmii_setup_phytype.
   10810 		 */
   10811 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10812 		    child->mii_mpd_model);
   10813 
   10814 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10815 	}
   10816 }
   10817 
   10818 /*
   10819  * wm_gmii_mediachange:	[ifmedia interface function]
   10820  *
   10821  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10822  */
   10823 static int
   10824 wm_gmii_mediachange(struct ifnet *ifp)
   10825 {
   10826 	struct wm_softc *sc = ifp->if_softc;
   10827 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10828 	uint32_t reg;
   10829 	int rc;
   10830 
   10831 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10832 		device_xname(sc->sc_dev), __func__));
   10833 	if ((ifp->if_flags & IFF_UP) == 0)
   10834 		return 0;
   10835 
   10836 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10837 	if ((sc->sc_type == WM_T_82580)
   10838 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10839 	    || (sc->sc_type == WM_T_I211)) {
   10840 		reg = CSR_READ(sc, WMREG_PHPM);
   10841 		reg &= ~PHPM_GO_LINK_D;
   10842 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10843 	}
   10844 
   10845 	/* Disable D0 LPLU. */
   10846 	wm_lplu_d0_disable(sc);
   10847 
   10848 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10849 	sc->sc_ctrl |= CTRL_SLU;
   10850 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10851 	    || (sc->sc_type > WM_T_82543)) {
   10852 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10853 	} else {
   10854 		sc->sc_ctrl &= ~CTRL_ASDE;
   10855 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10856 		if (ife->ifm_media & IFM_FDX)
   10857 			sc->sc_ctrl |= CTRL_FD;
   10858 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10859 		case IFM_10_T:
   10860 			sc->sc_ctrl |= CTRL_SPEED_10;
   10861 			break;
   10862 		case IFM_100_TX:
   10863 			sc->sc_ctrl |= CTRL_SPEED_100;
   10864 			break;
   10865 		case IFM_1000_T:
   10866 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10867 			break;
   10868 		case IFM_NONE:
   10869 			/* There is no specific setting for IFM_NONE */
   10870 			break;
   10871 		default:
   10872 			panic("wm_gmii_mediachange: bad media 0x%x",
   10873 			    ife->ifm_media);
   10874 		}
   10875 	}
   10876 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10877 	CSR_WRITE_FLUSH(sc);
   10878 
   10879 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10880 		wm_serdes_mediachange(ifp);
   10881 
   10882 	if (sc->sc_type <= WM_T_82543)
   10883 		wm_gmii_reset(sc);
   10884 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10885 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10886 		/* allow time for SFP cage time to power up phy */
   10887 		delay(300 * 1000);
   10888 		wm_gmii_reset(sc);
   10889 	}
   10890 
   10891 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10892 		return 0;
   10893 	return rc;
   10894 }
   10895 
   10896 /*
   10897  * wm_gmii_mediastatus:	[ifmedia interface function]
   10898  *
   10899  *	Get the current interface media status on a 1000BASE-T device.
   10900  */
   10901 static void
   10902 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10903 {
   10904 	struct wm_softc *sc = ifp->if_softc;
   10905 
   10906 	ether_mediastatus(ifp, ifmr);
   10907 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10908 	    | sc->sc_flowflags;
   10909 }
   10910 
   10911 #define	MDI_IO		CTRL_SWDPIN(2)
   10912 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10913 #define	MDI_CLK		CTRL_SWDPIN(3)
   10914 
   10915 static void
   10916 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10917 {
   10918 	uint32_t i, v;
   10919 
   10920 	v = CSR_READ(sc, WMREG_CTRL);
   10921 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10922 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10923 
   10924 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10925 		if (data & i)
   10926 			v |= MDI_IO;
   10927 		else
   10928 			v &= ~MDI_IO;
   10929 		CSR_WRITE(sc, WMREG_CTRL, v);
   10930 		CSR_WRITE_FLUSH(sc);
   10931 		delay(10);
   10932 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10933 		CSR_WRITE_FLUSH(sc);
   10934 		delay(10);
   10935 		CSR_WRITE(sc, WMREG_CTRL, v);
   10936 		CSR_WRITE_FLUSH(sc);
   10937 		delay(10);
   10938 	}
   10939 }
   10940 
   10941 static uint16_t
   10942 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10943 {
   10944 	uint32_t v, i;
   10945 	uint16_t data = 0;
   10946 
   10947 	v = CSR_READ(sc, WMREG_CTRL);
   10948 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10949 	v |= CTRL_SWDPIO(3);
   10950 
   10951 	CSR_WRITE(sc, WMREG_CTRL, v);
   10952 	CSR_WRITE_FLUSH(sc);
   10953 	delay(10);
   10954 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10955 	CSR_WRITE_FLUSH(sc);
   10956 	delay(10);
   10957 	CSR_WRITE(sc, WMREG_CTRL, v);
   10958 	CSR_WRITE_FLUSH(sc);
   10959 	delay(10);
   10960 
   10961 	for (i = 0; i < 16; i++) {
   10962 		data <<= 1;
   10963 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10964 		CSR_WRITE_FLUSH(sc);
   10965 		delay(10);
   10966 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10967 			data |= 1;
   10968 		CSR_WRITE(sc, WMREG_CTRL, v);
   10969 		CSR_WRITE_FLUSH(sc);
   10970 		delay(10);
   10971 	}
   10972 
   10973 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10974 	CSR_WRITE_FLUSH(sc);
   10975 	delay(10);
   10976 	CSR_WRITE(sc, WMREG_CTRL, v);
   10977 	CSR_WRITE_FLUSH(sc);
   10978 	delay(10);
   10979 
   10980 	return data;
   10981 }
   10982 
   10983 #undef MDI_IO
   10984 #undef MDI_DIR
   10985 #undef MDI_CLK
   10986 
   10987 /*
   10988  * wm_gmii_i82543_readreg:	[mii interface function]
   10989  *
   10990  *	Read a PHY register on the GMII (i82543 version).
   10991  */
   10992 static int
   10993 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10994 {
   10995 	struct wm_softc *sc = device_private(dev);
   10996 
   10997 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10998 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10999 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11000 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11001 
   11002 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11003 		device_xname(dev), phy, reg, *val));
   11004 
   11005 	return 0;
   11006 }
   11007 
   11008 /*
   11009  * wm_gmii_i82543_writereg:	[mii interface function]
   11010  *
   11011  *	Write a PHY register on the GMII (i82543 version).
   11012  */
   11013 static int
   11014 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11015 {
   11016 	struct wm_softc *sc = device_private(dev);
   11017 
   11018 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11019 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11020 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11021 	    (MII_COMMAND_START << 30), 32);
   11022 
   11023 	return 0;
   11024 }
   11025 
   11026 /*
   11027  * wm_gmii_mdic_readreg:	[mii interface function]
   11028  *
   11029  *	Read a PHY register on the GMII.
   11030  */
   11031 static int
   11032 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11033 {
   11034 	struct wm_softc *sc = device_private(dev);
   11035 	uint32_t mdic = 0;
   11036 	int i;
   11037 
   11038 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11039 	    && (reg > MII_ADDRMASK)) {
   11040 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11041 		    __func__, sc->sc_phytype, reg);
   11042 		reg &= MII_ADDRMASK;
   11043 	}
   11044 
   11045 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11046 	    MDIC_REGADD(reg));
   11047 
   11048 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11049 		delay(50);
   11050 		mdic = CSR_READ(sc, WMREG_MDIC);
   11051 		if (mdic & MDIC_READY)
   11052 			break;
   11053 	}
   11054 
   11055 	if ((mdic & MDIC_READY) == 0) {
   11056 		DPRINTF(sc, WM_DEBUG_GMII,
   11057 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11058 			device_xname(dev), phy, reg));
   11059 		return ETIMEDOUT;
   11060 	} else if (mdic & MDIC_E) {
   11061 		/* This is normal if no PHY is present. */
   11062 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   11063 			device_xname(sc->sc_dev), phy, reg));
   11064 		return -1;
   11065 	} else
   11066 		*val = MDIC_DATA(mdic);
   11067 
   11068 	/*
   11069 	 * Allow some time after each MDIC transaction to avoid
   11070 	 * reading duplicate data in the next MDIC transaction.
   11071 	 */
   11072 	if (sc->sc_type == WM_T_PCH2)
   11073 		delay(100);
   11074 
   11075 	return 0;
   11076 }
   11077 
   11078 /*
   11079  * wm_gmii_mdic_writereg:	[mii interface function]
   11080  *
   11081  *	Write a PHY register on the GMII.
   11082  */
   11083 static int
   11084 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11085 {
   11086 	struct wm_softc *sc = device_private(dev);
   11087 	uint32_t mdic = 0;
   11088 	int i;
   11089 
   11090 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11091 	    && (reg > MII_ADDRMASK)) {
   11092 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11093 		    __func__, sc->sc_phytype, reg);
   11094 		reg &= MII_ADDRMASK;
   11095 	}
   11096 
   11097 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11098 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11099 
   11100 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11101 		delay(50);
   11102 		mdic = CSR_READ(sc, WMREG_MDIC);
   11103 		if (mdic & MDIC_READY)
   11104 			break;
   11105 	}
   11106 
   11107 	if ((mdic & MDIC_READY) == 0) {
   11108 		DPRINTF(sc, WM_DEBUG_GMII,
   11109 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11110 			device_xname(dev), phy, reg));
   11111 		return ETIMEDOUT;
   11112 	} else if (mdic & MDIC_E) {
   11113 		DPRINTF(sc, WM_DEBUG_GMII,
   11114 		    ("%s: MDIC write error: phy %d reg %d\n",
   11115 			device_xname(dev), phy, reg));
   11116 		return -1;
   11117 	}
   11118 
   11119 	/*
   11120 	 * Allow some time after each MDIC transaction to avoid
   11121 	 * reading duplicate data in the next MDIC transaction.
   11122 	 */
   11123 	if (sc->sc_type == WM_T_PCH2)
   11124 		delay(100);
   11125 
   11126 	return 0;
   11127 }
   11128 
   11129 /*
   11130  * wm_gmii_i82544_readreg:	[mii interface function]
   11131  *
   11132  *	Read a PHY register on the GMII.
   11133  */
   11134 static int
   11135 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11136 {
   11137 	struct wm_softc *sc = device_private(dev);
   11138 	int rv;
   11139 
   11140 	if (sc->phy.acquire(sc)) {
   11141 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11142 		return -1;
   11143 	}
   11144 
   11145 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11146 
   11147 	sc->phy.release(sc);
   11148 
   11149 	return rv;
   11150 }
   11151 
   11152 static int
   11153 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11154 {
   11155 	struct wm_softc *sc = device_private(dev);
   11156 	int rv;
   11157 
   11158 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11159 		switch (sc->sc_phytype) {
   11160 		case WMPHY_IGP:
   11161 		case WMPHY_IGP_2:
   11162 		case WMPHY_IGP_3:
   11163 			rv = wm_gmii_mdic_writereg(dev, phy,
   11164 			    IGPHY_PAGE_SELECT, reg);
   11165 			if (rv != 0)
   11166 				return rv;
   11167 			break;
   11168 		default:
   11169 #ifdef WM_DEBUG
   11170 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11171 			    __func__, sc->sc_phytype, reg);
   11172 #endif
   11173 			break;
   11174 		}
   11175 	}
   11176 
   11177 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11178 }
   11179 
   11180 /*
   11181  * wm_gmii_i82544_writereg:	[mii interface function]
   11182  *
   11183  *	Write a PHY register on the GMII.
   11184  */
   11185 static int
   11186 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11187 {
   11188 	struct wm_softc *sc = device_private(dev);
   11189 	int rv;
   11190 
   11191 	if (sc->phy.acquire(sc)) {
   11192 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11193 		return -1;
   11194 	}
   11195 
   11196 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11197 	sc->phy.release(sc);
   11198 
   11199 	return rv;
   11200 }
   11201 
   11202 static int
   11203 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11204 {
   11205 	struct wm_softc *sc = device_private(dev);
   11206 	int rv;
   11207 
   11208 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11209 		switch (sc->sc_phytype) {
   11210 		case WMPHY_IGP:
   11211 		case WMPHY_IGP_2:
   11212 		case WMPHY_IGP_3:
   11213 			rv = wm_gmii_mdic_writereg(dev, phy,
   11214 			    IGPHY_PAGE_SELECT, reg);
   11215 			if (rv != 0)
   11216 				return rv;
   11217 			break;
   11218 		default:
   11219 #ifdef WM_DEBUG
   11220 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11221 			    __func__, sc->sc_phytype, reg);
   11222 #endif
   11223 			break;
   11224 		}
   11225 	}
   11226 
   11227 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11228 }
   11229 
   11230 /*
   11231  * wm_gmii_i80003_readreg:	[mii interface function]
   11232  *
   11233  *	Read a PHY register on the kumeran
   11234  * This could be handled by the PHY layer if we didn't have to lock the
   11235  * resource ...
   11236  */
   11237 static int
   11238 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11239 {
   11240 	struct wm_softc *sc = device_private(dev);
   11241 	int page_select;
   11242 	uint16_t temp, temp2;
   11243 	int rv = 0;
   11244 
   11245 	if (phy != 1) /* Only one PHY on kumeran bus */
   11246 		return -1;
   11247 
   11248 	if (sc->phy.acquire(sc)) {
   11249 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11250 		return -1;
   11251 	}
   11252 
   11253 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11254 		page_select = GG82563_PHY_PAGE_SELECT;
   11255 	else {
   11256 		/*
   11257 		 * Use Alternative Page Select register to access registers
   11258 		 * 30 and 31.
   11259 		 */
   11260 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11261 	}
   11262 	temp = reg >> GG82563_PAGE_SHIFT;
   11263 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11264 		goto out;
   11265 
   11266 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11267 		/*
   11268 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11269 		 * register.
   11270 		 */
   11271 		delay(200);
   11272 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11273 		if ((rv != 0) || (temp2 != temp)) {
   11274 			device_printf(dev, "%s failed\n", __func__);
   11275 			rv = -1;
   11276 			goto out;
   11277 		}
   11278 		delay(200);
   11279 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11280 		delay(200);
   11281 	} else
   11282 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11283 
   11284 out:
   11285 	sc->phy.release(sc);
   11286 	return rv;
   11287 }
   11288 
   11289 /*
   11290  * wm_gmii_i80003_writereg:	[mii interface function]
   11291  *
   11292  *	Write a PHY register on the kumeran.
   11293  * This could be handled by the PHY layer if we didn't have to lock the
   11294  * resource ...
   11295  */
   11296 static int
   11297 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11298 {
   11299 	struct wm_softc *sc = device_private(dev);
   11300 	int page_select, rv;
   11301 	uint16_t temp, temp2;
   11302 
   11303 	if (phy != 1) /* Only one PHY on kumeran bus */
   11304 		return -1;
   11305 
   11306 	if (sc->phy.acquire(sc)) {
   11307 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11308 		return -1;
   11309 	}
   11310 
   11311 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11312 		page_select = GG82563_PHY_PAGE_SELECT;
   11313 	else {
   11314 		/*
   11315 		 * Use Alternative Page Select register to access registers
   11316 		 * 30 and 31.
   11317 		 */
   11318 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11319 	}
   11320 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11321 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11322 		goto out;
   11323 
   11324 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11325 		/*
   11326 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11327 		 * register.
   11328 		 */
   11329 		delay(200);
   11330 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11331 		if ((rv != 0) || (temp2 != temp)) {
   11332 			device_printf(dev, "%s failed\n", __func__);
   11333 			rv = -1;
   11334 			goto out;
   11335 		}
   11336 		delay(200);
   11337 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11338 		delay(200);
   11339 	} else
   11340 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11341 
   11342 out:
   11343 	sc->phy.release(sc);
   11344 	return rv;
   11345 }
   11346 
   11347 /*
   11348  * wm_gmii_bm_readreg:	[mii interface function]
   11349  *
   11350  *	Read a PHY register on the kumeran
   11351  * This could be handled by the PHY layer if we didn't have to lock the
   11352  * resource ...
   11353  */
   11354 static int
   11355 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11356 {
   11357 	struct wm_softc *sc = device_private(dev);
   11358 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11359 	int rv;
   11360 
   11361 	if (sc->phy.acquire(sc)) {
   11362 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11363 		return -1;
   11364 	}
   11365 
   11366 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11367 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11368 		    || (reg == 31)) ? 1 : phy;
   11369 	/* Page 800 works differently than the rest so it has its own func */
   11370 	if (page == BM_WUC_PAGE) {
   11371 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11372 		goto release;
   11373 	}
   11374 
   11375 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11376 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11377 		    && (sc->sc_type != WM_T_82583))
   11378 			rv = wm_gmii_mdic_writereg(dev, phy,
   11379 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11380 		else
   11381 			rv = wm_gmii_mdic_writereg(dev, phy,
   11382 			    BME1000_PHY_PAGE_SELECT, page);
   11383 		if (rv != 0)
   11384 			goto release;
   11385 	}
   11386 
   11387 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11388 
   11389 release:
   11390 	sc->phy.release(sc);
   11391 	return rv;
   11392 }
   11393 
   11394 /*
   11395  * wm_gmii_bm_writereg:	[mii interface function]
   11396  *
   11397  *	Write a PHY register on the kumeran.
   11398  * This could be handled by the PHY layer if we didn't have to lock the
   11399  * resource ...
   11400  */
   11401 static int
   11402 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11403 {
   11404 	struct wm_softc *sc = device_private(dev);
   11405 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11406 	int rv;
   11407 
   11408 	if (sc->phy.acquire(sc)) {
   11409 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11410 		return -1;
   11411 	}
   11412 
   11413 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11414 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11415 		    || (reg == 31)) ? 1 : phy;
   11416 	/* Page 800 works differently than the rest so it has its own func */
   11417 	if (page == BM_WUC_PAGE) {
   11418 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11419 		goto release;
   11420 	}
   11421 
   11422 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11423 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11424 		    && (sc->sc_type != WM_T_82583))
   11425 			rv = wm_gmii_mdic_writereg(dev, phy,
   11426 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11427 		else
   11428 			rv = wm_gmii_mdic_writereg(dev, phy,
   11429 			    BME1000_PHY_PAGE_SELECT, page);
   11430 		if (rv != 0)
   11431 			goto release;
   11432 	}
   11433 
   11434 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11435 
   11436 release:
   11437 	sc->phy.release(sc);
   11438 	return rv;
   11439 }
   11440 
   11441 /*
   11442  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11443  *  @dev: pointer to the HW structure
   11444  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11445  *
   11446  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11447  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11448  */
   11449 static int
   11450 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11451 {
   11452 #ifdef WM_DEBUG
   11453 	struct wm_softc *sc = device_private(dev);
   11454 #endif
   11455 	uint16_t temp;
   11456 	int rv;
   11457 
   11458 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11459 		device_xname(dev), __func__));
   11460 
   11461 	if (!phy_regp)
   11462 		return -1;
   11463 
   11464 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11465 
   11466 	/* Select Port Control Registers page */
   11467 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11468 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11469 	if (rv != 0)
   11470 		return rv;
   11471 
   11472 	/* Read WUCE and save it */
   11473 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11474 	if (rv != 0)
   11475 		return rv;
   11476 
   11477 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11478 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11479 	 */
   11480 	temp = *phy_regp;
   11481 	temp |= BM_WUC_ENABLE_BIT;
   11482 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11483 
   11484 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11485 		return rv;
   11486 
   11487 	/* Select Host Wakeup Registers page - caller now able to write
   11488 	 * registers on the Wakeup registers page
   11489 	 */
   11490 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11491 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11492 }
   11493 
   11494 /*
   11495  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11496  *  @dev: pointer to the HW structure
   11497  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11498  *
   11499  *  Restore BM_WUC_ENABLE_REG to its original value.
   11500  *
   11501  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11502  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11503  *  caller.
   11504  */
   11505 static int
   11506 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11507 {
   11508 #ifdef WM_DEBUG
   11509 	struct wm_softc *sc = device_private(dev);
   11510 #endif
   11511 
   11512 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11513 		device_xname(dev), __func__));
   11514 
   11515 	if (!phy_regp)
   11516 		return -1;
   11517 
   11518 	/* Select Port Control Registers page */
   11519 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11520 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11521 
   11522 	/* Restore 769.17 to its original value */
   11523 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11524 
   11525 	return 0;
   11526 }
   11527 
   11528 /*
   11529  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11530  *  @sc: pointer to the HW structure
   11531  *  @offset: register offset to be read or written
   11532  *  @val: pointer to the data to read or write
   11533  *  @rd: determines if operation is read or write
   11534  *  @page_set: BM_WUC_PAGE already set and access enabled
   11535  *
   11536  *  Read the PHY register at offset and store the retrieved information in
   11537  *  data, or write data to PHY register at offset.  Note the procedure to
   11538  *  access the PHY wakeup registers is different than reading the other PHY
   11539  *  registers. It works as such:
   11540  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11541  *  2) Set page to 800 for host (801 if we were manageability)
   11542  *  3) Write the address using the address opcode (0x11)
   11543  *  4) Read or write the data using the data opcode (0x12)
   11544  *  5) Restore 769.17.2 to its original value
   11545  *
   11546  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11547  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11548  *
   11549  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11550  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11551  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11552  */
   11553 static int
   11554 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11555 	bool page_set)
   11556 {
   11557 	struct wm_softc *sc = device_private(dev);
   11558 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11559 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11560 	uint16_t wuce;
   11561 	int rv = 0;
   11562 
   11563 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11564 		device_xname(dev), __func__));
   11565 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11566 	if ((sc->sc_type == WM_T_PCH)
   11567 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11568 		device_printf(dev,
   11569 		    "Attempting to access page %d while gig enabled.\n", page);
   11570 	}
   11571 
   11572 	if (!page_set) {
   11573 		/* Enable access to PHY wakeup registers */
   11574 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11575 		if (rv != 0) {
   11576 			device_printf(dev,
   11577 			    "%s: Could not enable PHY wakeup reg access\n",
   11578 			    __func__);
   11579 			return rv;
   11580 		}
   11581 	}
   11582 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11583 		device_xname(sc->sc_dev), __func__, page, regnum));
   11584 
   11585 	/*
   11586 	 * 2) Access PHY wakeup register.
   11587 	 * See wm_access_phy_wakeup_reg_bm.
   11588 	 */
   11589 
   11590 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11591 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11592 	if (rv != 0)
   11593 		return rv;
   11594 
   11595 	if (rd) {
   11596 		/* Read the Wakeup register page value using opcode 0x12 */
   11597 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11598 	} else {
   11599 		/* Write the Wakeup register page value using opcode 0x12 */
   11600 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11601 	}
   11602 	if (rv != 0)
   11603 		return rv;
   11604 
   11605 	if (!page_set)
   11606 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11607 
   11608 	return rv;
   11609 }
   11610 
   11611 /*
   11612  * wm_gmii_hv_readreg:	[mii interface function]
   11613  *
   11614  *	Read a PHY register on the kumeran
   11615  * This could be handled by the PHY layer if we didn't have to lock the
   11616  * resource ...
   11617  */
   11618 static int
   11619 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11620 {
   11621 	struct wm_softc *sc = device_private(dev);
   11622 	int rv;
   11623 
   11624 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11625 		device_xname(dev), __func__));
   11626 	if (sc->phy.acquire(sc)) {
   11627 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11628 		return -1;
   11629 	}
   11630 
   11631 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11632 	sc->phy.release(sc);
   11633 	return rv;
   11634 }
   11635 
   11636 static int
   11637 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11638 {
   11639 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11640 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11641 	int rv;
   11642 
   11643 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11644 
   11645 	/* Page 800 works differently than the rest so it has its own func */
   11646 	if (page == BM_WUC_PAGE)
   11647 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11648 
   11649 	/*
   11650 	 * Lower than page 768 works differently than the rest so it has its
   11651 	 * own func
   11652 	 */
   11653 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11654 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11655 		return -1;
   11656 	}
   11657 
   11658 	/*
   11659 	 * XXX I21[789] documents say that the SMBus Address register is at
   11660 	 * PHY address 01, Page 0 (not 768), Register 26.
   11661 	 */
   11662 	if (page == HV_INTC_FC_PAGE_START)
   11663 		page = 0;
   11664 
   11665 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11666 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11667 		    page << BME1000_PAGE_SHIFT);
   11668 		if (rv != 0)
   11669 			return rv;
   11670 	}
   11671 
   11672 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11673 }
   11674 
   11675 /*
   11676  * wm_gmii_hv_writereg:	[mii interface function]
   11677  *
   11678  *	Write a PHY register on the kumeran.
   11679  * This could be handled by the PHY layer if we didn't have to lock the
   11680  * resource ...
   11681  */
   11682 static int
   11683 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11684 {
   11685 	struct wm_softc *sc = device_private(dev);
   11686 	int rv;
   11687 
   11688 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11689 		device_xname(dev), __func__));
   11690 
   11691 	if (sc->phy.acquire(sc)) {
   11692 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11693 		return -1;
   11694 	}
   11695 
   11696 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11697 	sc->phy.release(sc);
   11698 
   11699 	return rv;
   11700 }
   11701 
   11702 static int
   11703 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11704 {
   11705 	struct wm_softc *sc = device_private(dev);
   11706 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11707 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11708 	int rv;
   11709 
   11710 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11711 
   11712 	/* Page 800 works differently than the rest so it has its own func */
   11713 	if (page == BM_WUC_PAGE)
   11714 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11715 		    false);
   11716 
   11717 	/*
   11718 	 * Lower than page 768 works differently than the rest so it has its
   11719 	 * own func
   11720 	 */
   11721 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11722 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11723 		return -1;
   11724 	}
   11725 
   11726 	{
   11727 		/*
   11728 		 * XXX I21[789] documents say that the SMBus Address register
   11729 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11730 		 */
   11731 		if (page == HV_INTC_FC_PAGE_START)
   11732 			page = 0;
   11733 
   11734 		/*
   11735 		 * XXX Workaround MDIO accesses being disabled after entering
   11736 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11737 		 * register is set)
   11738 		 */
   11739 		if (sc->sc_phytype == WMPHY_82578) {
   11740 			struct mii_softc *child;
   11741 
   11742 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11743 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11744 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11745 			    && ((val & (1 << 11)) != 0)) {
   11746 				device_printf(dev, "XXX need workaround\n");
   11747 			}
   11748 		}
   11749 
   11750 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11751 			rv = wm_gmii_mdic_writereg(dev, 1,
   11752 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11753 			if (rv != 0)
   11754 				return rv;
   11755 		}
   11756 	}
   11757 
   11758 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11759 }
   11760 
   11761 /*
   11762  * wm_gmii_82580_readreg:	[mii interface function]
   11763  *
   11764  *	Read a PHY register on the 82580 and I350.
   11765  * This could be handled by the PHY layer if we didn't have to lock the
   11766  * resource ...
   11767  */
   11768 static int
   11769 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11770 {
   11771 	struct wm_softc *sc = device_private(dev);
   11772 	int rv;
   11773 
   11774 	if (sc->phy.acquire(sc) != 0) {
   11775 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11776 		return -1;
   11777 	}
   11778 
   11779 #ifdef DIAGNOSTIC
   11780 	if (reg > MII_ADDRMASK) {
   11781 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11782 		    __func__, sc->sc_phytype, reg);
   11783 		reg &= MII_ADDRMASK;
   11784 	}
   11785 #endif
   11786 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11787 
   11788 	sc->phy.release(sc);
   11789 	return rv;
   11790 }
   11791 
   11792 /*
   11793  * wm_gmii_82580_writereg:	[mii interface function]
   11794  *
   11795  *	Write a PHY register on the 82580 and I350.
   11796  * This could be handled by the PHY layer if we didn't have to lock the
   11797  * resource ...
   11798  */
   11799 static int
   11800 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11801 {
   11802 	struct wm_softc *sc = device_private(dev);
   11803 	int rv;
   11804 
   11805 	if (sc->phy.acquire(sc) != 0) {
   11806 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11807 		return -1;
   11808 	}
   11809 
   11810 #ifdef DIAGNOSTIC
   11811 	if (reg > MII_ADDRMASK) {
   11812 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11813 		    __func__, sc->sc_phytype, reg);
   11814 		reg &= MII_ADDRMASK;
   11815 	}
   11816 #endif
   11817 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11818 
   11819 	sc->phy.release(sc);
   11820 	return rv;
   11821 }
   11822 
   11823 /*
   11824  * wm_gmii_gs40g_readreg:	[mii interface function]
   11825  *
   11826  *	Read a PHY register on the I2100 and I211.
   11827  * This could be handled by the PHY layer if we didn't have to lock the
   11828  * resource ...
   11829  */
   11830 static int
   11831 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11832 {
   11833 	struct wm_softc *sc = device_private(dev);
   11834 	int page, offset;
   11835 	int rv;
   11836 
   11837 	/* Acquire semaphore */
   11838 	if (sc->phy.acquire(sc)) {
   11839 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11840 		return -1;
   11841 	}
   11842 
   11843 	/* Page select */
   11844 	page = reg >> GS40G_PAGE_SHIFT;
   11845 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11846 	if (rv != 0)
   11847 		goto release;
   11848 
   11849 	/* Read reg */
   11850 	offset = reg & GS40G_OFFSET_MASK;
   11851 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11852 
   11853 release:
   11854 	sc->phy.release(sc);
   11855 	return rv;
   11856 }
   11857 
   11858 /*
   11859  * wm_gmii_gs40g_writereg:	[mii interface function]
   11860  *
   11861  *	Write a PHY register on the I210 and I211.
   11862  * This could be handled by the PHY layer if we didn't have to lock the
   11863  * resource ...
   11864  */
   11865 static int
   11866 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11867 {
   11868 	struct wm_softc *sc = device_private(dev);
   11869 	uint16_t page;
   11870 	int offset, rv;
   11871 
   11872 	/* Acquire semaphore */
   11873 	if (sc->phy.acquire(sc)) {
   11874 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11875 		return -1;
   11876 	}
   11877 
   11878 	/* Page select */
   11879 	page = reg >> GS40G_PAGE_SHIFT;
   11880 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11881 	if (rv != 0)
   11882 		goto release;
   11883 
   11884 	/* Write reg */
   11885 	offset = reg & GS40G_OFFSET_MASK;
   11886 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11887 
   11888 release:
   11889 	/* Release semaphore */
   11890 	sc->phy.release(sc);
   11891 	return rv;
   11892 }
   11893 
   11894 /*
   11895  * wm_gmii_statchg:	[mii interface function]
   11896  *
   11897  *	Callback from MII layer when media changes.
   11898  */
   11899 static void
   11900 wm_gmii_statchg(struct ifnet *ifp)
   11901 {
   11902 	struct wm_softc *sc = ifp->if_softc;
   11903 	struct mii_data *mii = &sc->sc_mii;
   11904 
   11905 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11906 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11907 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11908 
   11909 	/* Get flow control negotiation result. */
   11910 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11911 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11912 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11913 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11914 	}
   11915 
   11916 	if (sc->sc_flowflags & IFM_FLOW) {
   11917 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11918 			sc->sc_ctrl |= CTRL_TFCE;
   11919 			sc->sc_fcrtl |= FCRTL_XONE;
   11920 		}
   11921 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11922 			sc->sc_ctrl |= CTRL_RFCE;
   11923 	}
   11924 
   11925 	if (mii->mii_media_active & IFM_FDX) {
   11926 		DPRINTF(sc, WM_DEBUG_LINK,
   11927 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11928 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11929 	} else {
   11930 		DPRINTF(sc, WM_DEBUG_LINK,
   11931 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11932 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11933 	}
   11934 
   11935 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11936 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11937 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11938 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11939 	if (sc->sc_type == WM_T_80003) {
   11940 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11941 		case IFM_1000_T:
   11942 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11943 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11944 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11945 			break;
   11946 		default:
   11947 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11948 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11949 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11950 			break;
   11951 		}
   11952 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11953 	}
   11954 }
   11955 
   11956 /* kumeran related (80003, ICH* and PCH*) */
   11957 
   11958 /*
   11959  * wm_kmrn_readreg:
   11960  *
   11961  *	Read a kumeran register
   11962  */
   11963 static int
   11964 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11965 {
   11966 	int rv;
   11967 
   11968 	if (sc->sc_type == WM_T_80003)
   11969 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11970 	else
   11971 		rv = sc->phy.acquire(sc);
   11972 	if (rv != 0) {
   11973 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11974 		    __func__);
   11975 		return rv;
   11976 	}
   11977 
   11978 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11979 
   11980 	if (sc->sc_type == WM_T_80003)
   11981 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11982 	else
   11983 		sc->phy.release(sc);
   11984 
   11985 	return rv;
   11986 }
   11987 
   11988 static int
   11989 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11990 {
   11991 
   11992 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11993 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11994 	    KUMCTRLSTA_REN);
   11995 	CSR_WRITE_FLUSH(sc);
   11996 	delay(2);
   11997 
   11998 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11999 
   12000 	return 0;
   12001 }
   12002 
   12003 /*
   12004  * wm_kmrn_writereg:
   12005  *
   12006  *	Write a kumeran register
   12007  */
   12008 static int
   12009 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12010 {
   12011 	int rv;
   12012 
   12013 	if (sc->sc_type == WM_T_80003)
   12014 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12015 	else
   12016 		rv = sc->phy.acquire(sc);
   12017 	if (rv != 0) {
   12018 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12019 		    __func__);
   12020 		return rv;
   12021 	}
   12022 
   12023 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12024 
   12025 	if (sc->sc_type == WM_T_80003)
   12026 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12027 	else
   12028 		sc->phy.release(sc);
   12029 
   12030 	return rv;
   12031 }
   12032 
   12033 static int
   12034 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12035 {
   12036 
   12037 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12038 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12039 
   12040 	return 0;
   12041 }
   12042 
   12043 /*
   12044  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12045  * This access method is different from IEEE MMD.
   12046  */
   12047 static int
   12048 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12049 {
   12050 	struct wm_softc *sc = device_private(dev);
   12051 	int rv;
   12052 
   12053 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12054 	if (rv != 0)
   12055 		return rv;
   12056 
   12057 	if (rd)
   12058 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12059 	else
   12060 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12061 	return rv;
   12062 }
   12063 
   12064 static int
   12065 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12066 {
   12067 
   12068 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12069 }
   12070 
   12071 static int
   12072 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12073 {
   12074 
   12075 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12076 }
   12077 
   12078 /* SGMII related */
   12079 
   12080 /*
   12081  * wm_sgmii_uses_mdio
   12082  *
   12083  * Check whether the transaction is to the internal PHY or the external
   12084  * MDIO interface. Return true if it's MDIO.
   12085  */
   12086 static bool
   12087 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12088 {
   12089 	uint32_t reg;
   12090 	bool ismdio = false;
   12091 
   12092 	switch (sc->sc_type) {
   12093 	case WM_T_82575:
   12094 	case WM_T_82576:
   12095 		reg = CSR_READ(sc, WMREG_MDIC);
   12096 		ismdio = ((reg & MDIC_DEST) != 0);
   12097 		break;
   12098 	case WM_T_82580:
   12099 	case WM_T_I350:
   12100 	case WM_T_I354:
   12101 	case WM_T_I210:
   12102 	case WM_T_I211:
   12103 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12104 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12105 		break;
   12106 	default:
   12107 		break;
   12108 	}
   12109 
   12110 	return ismdio;
   12111 }
   12112 
   12113 /* Setup internal SGMII PHY for SFP */
   12114 static void
   12115 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12116 {
   12117 	uint16_t id1, id2, phyreg;
   12118 	int i, rv;
   12119 
   12120 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12121 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12122 		return;
   12123 
   12124 	for (i = 0; i < MII_NPHY; i++) {
   12125 		sc->phy.no_errprint = true;
   12126 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12127 		if (rv != 0)
   12128 			continue;
   12129 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12130 		if (rv != 0)
   12131 			continue;
   12132 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12133 			continue;
   12134 		sc->phy.no_errprint = false;
   12135 
   12136 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12137 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12138 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12139 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12140 		break;
   12141 	}
   12142 
   12143 }
   12144 
   12145 /*
   12146  * wm_sgmii_readreg:	[mii interface function]
   12147  *
   12148  *	Read a PHY register on the SGMII
   12149  * This could be handled by the PHY layer if we didn't have to lock the
   12150  * resource ...
   12151  */
   12152 static int
   12153 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12154 {
   12155 	struct wm_softc *sc = device_private(dev);
   12156 	int rv;
   12157 
   12158 	if (sc->phy.acquire(sc)) {
   12159 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12160 		return -1;
   12161 	}
   12162 
   12163 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12164 
   12165 	sc->phy.release(sc);
   12166 	return rv;
   12167 }
   12168 
   12169 static int
   12170 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12171 {
   12172 	struct wm_softc *sc = device_private(dev);
   12173 	uint32_t i2ccmd;
   12174 	int i, rv = 0;
   12175 
   12176 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12177 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12178 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12179 
   12180 	/* Poll the ready bit */
   12181 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12182 		delay(50);
   12183 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12184 		if (i2ccmd & I2CCMD_READY)
   12185 			break;
   12186 	}
   12187 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12188 		device_printf(dev, "I2CCMD Read did not complete\n");
   12189 		rv = ETIMEDOUT;
   12190 	}
   12191 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12192 		if (!sc->phy.no_errprint)
   12193 			device_printf(dev, "I2CCMD Error bit set\n");
   12194 		rv = EIO;
   12195 	}
   12196 
   12197 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12198 
   12199 	return rv;
   12200 }
   12201 
   12202 /*
   12203  * wm_sgmii_writereg:	[mii interface function]
   12204  *
   12205  *	Write a PHY register on the SGMII.
   12206  * This could be handled by the PHY layer if we didn't have to lock the
   12207  * resource ...
   12208  */
   12209 static int
   12210 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12211 {
   12212 	struct wm_softc *sc = device_private(dev);
   12213 	int rv;
   12214 
   12215 	if (sc->phy.acquire(sc) != 0) {
   12216 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12217 		return -1;
   12218 	}
   12219 
   12220 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12221 
   12222 	sc->phy.release(sc);
   12223 
   12224 	return rv;
   12225 }
   12226 
   12227 static int
   12228 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12229 {
   12230 	struct wm_softc *sc = device_private(dev);
   12231 	uint32_t i2ccmd;
   12232 	uint16_t swapdata;
   12233 	int rv = 0;
   12234 	int i;
   12235 
   12236 	/* Swap the data bytes for the I2C interface */
   12237 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12238 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12239 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12240 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12241 
   12242 	/* Poll the ready bit */
   12243 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12244 		delay(50);
   12245 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12246 		if (i2ccmd & I2CCMD_READY)
   12247 			break;
   12248 	}
   12249 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12250 		device_printf(dev, "I2CCMD Write did not complete\n");
   12251 		rv = ETIMEDOUT;
   12252 	}
   12253 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12254 		device_printf(dev, "I2CCMD Error bit set\n");
   12255 		rv = EIO;
   12256 	}
   12257 
   12258 	return rv;
   12259 }
   12260 
   12261 /* TBI related */
   12262 
   12263 static bool
   12264 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12265 {
   12266 	bool sig;
   12267 
   12268 	sig = ctrl & CTRL_SWDPIN(1);
   12269 
   12270 	/*
   12271 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12272 	 * detect a signal, 1 if they don't.
   12273 	 */
   12274 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12275 		sig = !sig;
   12276 
   12277 	return sig;
   12278 }
   12279 
   12280 /*
   12281  * wm_tbi_mediainit:
   12282  *
   12283  *	Initialize media for use on 1000BASE-X devices.
   12284  */
   12285 static void
   12286 wm_tbi_mediainit(struct wm_softc *sc)
   12287 {
   12288 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12289 	const char *sep = "";
   12290 
   12291 	if (sc->sc_type < WM_T_82543)
   12292 		sc->sc_tipg = TIPG_WM_DFLT;
   12293 	else
   12294 		sc->sc_tipg = TIPG_LG_DFLT;
   12295 
   12296 	sc->sc_tbi_serdes_anegticks = 5;
   12297 
   12298 	/* Initialize our media structures */
   12299 	sc->sc_mii.mii_ifp = ifp;
   12300 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12301 
   12302 	ifp->if_baudrate = IF_Gbps(1);
   12303 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12304 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12305 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12306 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12307 		    sc->sc_core_lock);
   12308 	} else {
   12309 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12310 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12311 	}
   12312 
   12313 	/*
   12314 	 * SWD Pins:
   12315 	 *
   12316 	 *	0 = Link LED (output)
   12317 	 *	1 = Loss Of Signal (input)
   12318 	 */
   12319 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12320 
   12321 	/* XXX Perhaps this is only for TBI */
   12322 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12323 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12324 
   12325 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12326 		sc->sc_ctrl &= ~CTRL_LRST;
   12327 
   12328 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12329 
   12330 #define	ADD(ss, mm, dd)							\
   12331 do {									\
   12332 	aprint_normal("%s%s", sep, ss);					\
   12333 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12334 	sep = ", ";							\
   12335 } while (/*CONSTCOND*/0)
   12336 
   12337 	aprint_normal_dev(sc->sc_dev, "");
   12338 
   12339 	if (sc->sc_type == WM_T_I354) {
   12340 		uint32_t status;
   12341 
   12342 		status = CSR_READ(sc, WMREG_STATUS);
   12343 		if (((status & STATUS_2P5_SKU) != 0)
   12344 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12345 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12346 		} else
   12347 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12348 	} else if (sc->sc_type == WM_T_82545) {
   12349 		/* Only 82545 is LX (XXX except SFP) */
   12350 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12351 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12352 	} else if (sc->sc_sfptype != 0) {
   12353 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12354 		switch (sc->sc_sfptype) {
   12355 		default:
   12356 		case SFF_SFP_ETH_FLAGS_1000SX:
   12357 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12358 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12359 			break;
   12360 		case SFF_SFP_ETH_FLAGS_1000LX:
   12361 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12362 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12363 			break;
   12364 		case SFF_SFP_ETH_FLAGS_1000CX:
   12365 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12366 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12367 			break;
   12368 		case SFF_SFP_ETH_FLAGS_1000T:
   12369 			ADD("1000baseT", IFM_1000_T, 0);
   12370 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12371 			break;
   12372 		case SFF_SFP_ETH_FLAGS_100FX:
   12373 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12374 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12375 			break;
   12376 		}
   12377 	} else {
   12378 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12379 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12380 	}
   12381 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12382 	aprint_normal("\n");
   12383 
   12384 #undef ADD
   12385 
   12386 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12387 }
   12388 
   12389 /*
   12390  * wm_tbi_mediachange:	[ifmedia interface function]
   12391  *
   12392  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12393  */
   12394 static int
   12395 wm_tbi_mediachange(struct ifnet *ifp)
   12396 {
   12397 	struct wm_softc *sc = ifp->if_softc;
   12398 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12399 	uint32_t status, ctrl;
   12400 	bool signal;
   12401 	int i;
   12402 
   12403 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12404 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12405 		/* XXX need some work for >= 82571 and < 82575 */
   12406 		if (sc->sc_type < WM_T_82575)
   12407 			return 0;
   12408 	}
   12409 
   12410 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12411 	    || (sc->sc_type >= WM_T_82575))
   12412 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12413 
   12414 	sc->sc_ctrl &= ~CTRL_LRST;
   12415 	sc->sc_txcw = TXCW_ANE;
   12416 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12417 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12418 	else if (ife->ifm_media & IFM_FDX)
   12419 		sc->sc_txcw |= TXCW_FD;
   12420 	else
   12421 		sc->sc_txcw |= TXCW_HD;
   12422 
   12423 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12424 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12425 
   12426 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12427 		device_xname(sc->sc_dev), sc->sc_txcw));
   12428 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12429 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12430 	CSR_WRITE_FLUSH(sc);
   12431 	delay(1000);
   12432 
   12433 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12434 	signal = wm_tbi_havesignal(sc, ctrl);
   12435 
   12436 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12437 		signal));
   12438 
   12439 	if (signal) {
   12440 		/* Have signal; wait for the link to come up. */
   12441 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12442 			delay(10000);
   12443 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12444 				break;
   12445 		}
   12446 
   12447 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12448 			device_xname(sc->sc_dev), i));
   12449 
   12450 		status = CSR_READ(sc, WMREG_STATUS);
   12451 		DPRINTF(sc, WM_DEBUG_LINK,
   12452 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12453 			device_xname(sc->sc_dev), status, STATUS_LU));
   12454 		if (status & STATUS_LU) {
   12455 			/* Link is up. */
   12456 			DPRINTF(sc, WM_DEBUG_LINK,
   12457 			    ("%s: LINK: set media -> link up %s\n",
   12458 				device_xname(sc->sc_dev),
   12459 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12460 
   12461 			/*
   12462 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12463 			 * so we should update sc->sc_ctrl
   12464 			 */
   12465 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12466 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12467 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12468 			if (status & STATUS_FD)
   12469 				sc->sc_tctl |=
   12470 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12471 			else
   12472 				sc->sc_tctl |=
   12473 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12474 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12475 				sc->sc_fcrtl |= FCRTL_XONE;
   12476 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12477 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12478 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12479 			sc->sc_tbi_linkup = 1;
   12480 		} else {
   12481 			if (i == WM_LINKUP_TIMEOUT)
   12482 				wm_check_for_link(sc);
   12483 			/* Link is down. */
   12484 			DPRINTF(sc, WM_DEBUG_LINK,
   12485 			    ("%s: LINK: set media -> link down\n",
   12486 				device_xname(sc->sc_dev)));
   12487 			sc->sc_tbi_linkup = 0;
   12488 		}
   12489 	} else {
   12490 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12491 			device_xname(sc->sc_dev)));
   12492 		sc->sc_tbi_linkup = 0;
   12493 	}
   12494 
   12495 	wm_tbi_serdes_set_linkled(sc);
   12496 
   12497 	return 0;
   12498 }
   12499 
   12500 /*
   12501  * wm_tbi_mediastatus:	[ifmedia interface function]
   12502  *
   12503  *	Get the current interface media status on a 1000BASE-X device.
   12504  */
   12505 static void
   12506 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12507 {
   12508 	struct wm_softc *sc = ifp->if_softc;
   12509 	uint32_t ctrl, status;
   12510 
   12511 	ifmr->ifm_status = IFM_AVALID;
   12512 	ifmr->ifm_active = IFM_ETHER;
   12513 
   12514 	status = CSR_READ(sc, WMREG_STATUS);
   12515 	if ((status & STATUS_LU) == 0) {
   12516 		ifmr->ifm_active |= IFM_NONE;
   12517 		return;
   12518 	}
   12519 
   12520 	ifmr->ifm_status |= IFM_ACTIVE;
   12521 	/* Only 82545 is LX */
   12522 	if (sc->sc_type == WM_T_82545)
   12523 		ifmr->ifm_active |= IFM_1000_LX;
   12524 	else
   12525 		ifmr->ifm_active |= IFM_1000_SX;
   12526 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12527 		ifmr->ifm_active |= IFM_FDX;
   12528 	else
   12529 		ifmr->ifm_active |= IFM_HDX;
   12530 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12531 	if (ctrl & CTRL_RFCE)
   12532 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12533 	if (ctrl & CTRL_TFCE)
   12534 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12535 }
   12536 
   12537 /* XXX TBI only */
   12538 static int
   12539 wm_check_for_link(struct wm_softc *sc)
   12540 {
   12541 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12542 	uint32_t rxcw;
   12543 	uint32_t ctrl;
   12544 	uint32_t status;
   12545 	bool signal;
   12546 
   12547 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12548 		device_xname(sc->sc_dev), __func__));
   12549 
   12550 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12551 		/* XXX need some work for >= 82571 */
   12552 		if (sc->sc_type >= WM_T_82571) {
   12553 			sc->sc_tbi_linkup = 1;
   12554 			return 0;
   12555 		}
   12556 	}
   12557 
   12558 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12559 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12560 	status = CSR_READ(sc, WMREG_STATUS);
   12561 	signal = wm_tbi_havesignal(sc, ctrl);
   12562 
   12563 	DPRINTF(sc, WM_DEBUG_LINK,
   12564 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12565 		device_xname(sc->sc_dev), __func__, signal,
   12566 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12567 
   12568 	/*
   12569 	 * SWDPIN   LU RXCW
   12570 	 *	0    0	  0
   12571 	 *	0    0	  1	(should not happen)
   12572 	 *	0    1	  0	(should not happen)
   12573 	 *	0    1	  1	(should not happen)
   12574 	 *	1    0	  0	Disable autonego and force linkup
   12575 	 *	1    0	  1	got /C/ but not linkup yet
   12576 	 *	1    1	  0	(linkup)
   12577 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12578 	 *
   12579 	 */
   12580 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12581 		DPRINTF(sc, WM_DEBUG_LINK,
   12582 		    ("%s: %s: force linkup and fullduplex\n",
   12583 			device_xname(sc->sc_dev), __func__));
   12584 		sc->sc_tbi_linkup = 0;
   12585 		/* Disable auto-negotiation in the TXCW register */
   12586 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12587 
   12588 		/*
   12589 		 * Force link-up and also force full-duplex.
   12590 		 *
   12591 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12592 		 * so we should update sc->sc_ctrl
   12593 		 */
   12594 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12595 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12596 	} else if (((status & STATUS_LU) != 0)
   12597 	    && ((rxcw & RXCW_C) != 0)
   12598 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12599 		sc->sc_tbi_linkup = 1;
   12600 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12601 			device_xname(sc->sc_dev),
   12602 			__func__));
   12603 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12604 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12605 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12606 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12607 			device_xname(sc->sc_dev), __func__));
   12608 	} else {
   12609 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12610 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12611 			status));
   12612 	}
   12613 
   12614 	return 0;
   12615 }
   12616 
   12617 /*
   12618  * wm_tbi_tick:
   12619  *
   12620  *	Check the link on TBI devices.
   12621  *	This function acts as mii_tick().
   12622  */
   12623 static void
   12624 wm_tbi_tick(struct wm_softc *sc)
   12625 {
   12626 	struct mii_data *mii = &sc->sc_mii;
   12627 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12628 	uint32_t status;
   12629 
   12630 	KASSERT(WM_CORE_LOCKED(sc));
   12631 
   12632 	status = CSR_READ(sc, WMREG_STATUS);
   12633 
   12634 	/* XXX is this needed? */
   12635 	(void)CSR_READ(sc, WMREG_RXCW);
   12636 	(void)CSR_READ(sc, WMREG_CTRL);
   12637 
   12638 	/* set link status */
   12639 	if ((status & STATUS_LU) == 0) {
   12640 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12641 			device_xname(sc->sc_dev)));
   12642 		sc->sc_tbi_linkup = 0;
   12643 	} else if (sc->sc_tbi_linkup == 0) {
   12644 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12645 			device_xname(sc->sc_dev),
   12646 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12647 		sc->sc_tbi_linkup = 1;
   12648 		sc->sc_tbi_serdes_ticks = 0;
   12649 	}
   12650 
   12651 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12652 		goto setled;
   12653 
   12654 	if ((status & STATUS_LU) == 0) {
   12655 		sc->sc_tbi_linkup = 0;
   12656 		/* If the timer expired, retry autonegotiation */
   12657 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12658 		    && (++sc->sc_tbi_serdes_ticks
   12659 			>= sc->sc_tbi_serdes_anegticks)) {
   12660 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12661 				device_xname(sc->sc_dev), __func__));
   12662 			sc->sc_tbi_serdes_ticks = 0;
   12663 			/*
   12664 			 * Reset the link, and let autonegotiation do
   12665 			 * its thing
   12666 			 */
   12667 			sc->sc_ctrl |= CTRL_LRST;
   12668 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12669 			CSR_WRITE_FLUSH(sc);
   12670 			delay(1000);
   12671 			sc->sc_ctrl &= ~CTRL_LRST;
   12672 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12673 			CSR_WRITE_FLUSH(sc);
   12674 			delay(1000);
   12675 			CSR_WRITE(sc, WMREG_TXCW,
   12676 			    sc->sc_txcw & ~TXCW_ANE);
   12677 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12678 		}
   12679 	}
   12680 
   12681 setled:
   12682 	wm_tbi_serdes_set_linkled(sc);
   12683 }
   12684 
   12685 /* SERDES related */
   12686 static void
   12687 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12688 {
   12689 	uint32_t reg;
   12690 
   12691 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12692 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12693 		return;
   12694 
   12695 	/* Enable PCS to turn on link */
   12696 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12697 	reg |= PCS_CFG_PCS_EN;
   12698 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12699 
   12700 	/* Power up the laser */
   12701 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12702 	reg &= ~CTRL_EXT_SWDPIN(3);
   12703 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12704 
   12705 	/* Flush the write to verify completion */
   12706 	CSR_WRITE_FLUSH(sc);
   12707 	delay(1000);
   12708 }
   12709 
   12710 static int
   12711 wm_serdes_mediachange(struct ifnet *ifp)
   12712 {
   12713 	struct wm_softc *sc = ifp->if_softc;
   12714 	bool pcs_autoneg = true; /* XXX */
   12715 	uint32_t ctrl_ext, pcs_lctl, reg;
   12716 
   12717 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12718 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12719 		return 0;
   12720 
   12721 	/* XXX Currently, this function is not called on 8257[12] */
   12722 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12723 	    || (sc->sc_type >= WM_T_82575))
   12724 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12725 
   12726 	/* Power on the sfp cage if present */
   12727 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12728 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12729 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12730 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12731 
   12732 	sc->sc_ctrl |= CTRL_SLU;
   12733 
   12734 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12735 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12736 
   12737 		reg = CSR_READ(sc, WMREG_CONNSW);
   12738 		reg |= CONNSW_ENRGSRC;
   12739 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12740 	}
   12741 
   12742 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12743 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12744 	case CTRL_EXT_LINK_MODE_SGMII:
   12745 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12746 		pcs_autoneg = true;
   12747 		/* Autoneg time out should be disabled for SGMII mode */
   12748 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12749 		break;
   12750 	case CTRL_EXT_LINK_MODE_1000KX:
   12751 		pcs_autoneg = false;
   12752 		/* FALLTHROUGH */
   12753 	default:
   12754 		if ((sc->sc_type == WM_T_82575)
   12755 		    || (sc->sc_type == WM_T_82576)) {
   12756 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12757 				pcs_autoneg = false;
   12758 		}
   12759 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12760 		    | CTRL_FRCFDX;
   12761 
   12762 		/* Set speed of 1000/Full if speed/duplex is forced */
   12763 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12764 	}
   12765 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12766 
   12767 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12768 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12769 
   12770 	if (pcs_autoneg) {
   12771 		/* Set PCS register for autoneg */
   12772 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12773 
   12774 		/* Disable force flow control for autoneg */
   12775 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12776 
   12777 		/* Configure flow control advertisement for autoneg */
   12778 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12779 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12780 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12781 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12782 	} else
   12783 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12784 
   12785 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12786 
   12787 	return 0;
   12788 }
   12789 
   12790 static void
   12791 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12792 {
   12793 	struct wm_softc *sc = ifp->if_softc;
   12794 	struct mii_data *mii = &sc->sc_mii;
   12795 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12796 	uint32_t pcs_adv, pcs_lpab, reg;
   12797 
   12798 	ifmr->ifm_status = IFM_AVALID;
   12799 	ifmr->ifm_active = IFM_ETHER;
   12800 
   12801 	/* Check PCS */
   12802 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12803 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12804 		ifmr->ifm_active |= IFM_NONE;
   12805 		sc->sc_tbi_linkup = 0;
   12806 		goto setled;
   12807 	}
   12808 
   12809 	sc->sc_tbi_linkup = 1;
   12810 	ifmr->ifm_status |= IFM_ACTIVE;
   12811 	if (sc->sc_type == WM_T_I354) {
   12812 		uint32_t status;
   12813 
   12814 		status = CSR_READ(sc, WMREG_STATUS);
   12815 		if (((status & STATUS_2P5_SKU) != 0)
   12816 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12817 			ifmr->ifm_active |= IFM_2500_KX;
   12818 		} else
   12819 			ifmr->ifm_active |= IFM_1000_KX;
   12820 	} else {
   12821 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12822 		case PCS_LSTS_SPEED_10:
   12823 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12824 			break;
   12825 		case PCS_LSTS_SPEED_100:
   12826 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12827 			break;
   12828 		case PCS_LSTS_SPEED_1000:
   12829 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12830 			break;
   12831 		default:
   12832 			device_printf(sc->sc_dev, "Unknown speed\n");
   12833 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12834 			break;
   12835 		}
   12836 	}
   12837 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12838 	if ((reg & PCS_LSTS_FDX) != 0)
   12839 		ifmr->ifm_active |= IFM_FDX;
   12840 	else
   12841 		ifmr->ifm_active |= IFM_HDX;
   12842 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12843 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12844 		/* Check flow */
   12845 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12846 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12847 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12848 			goto setled;
   12849 		}
   12850 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12851 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12852 		DPRINTF(sc, WM_DEBUG_LINK,
   12853 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12854 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12855 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12856 			mii->mii_media_active |= IFM_FLOW
   12857 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12858 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12859 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12860 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12861 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12862 			mii->mii_media_active |= IFM_FLOW
   12863 			    | IFM_ETH_TXPAUSE;
   12864 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12865 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12866 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12867 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12868 			mii->mii_media_active |= IFM_FLOW
   12869 			    | IFM_ETH_RXPAUSE;
   12870 		}
   12871 	}
   12872 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12873 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12874 setled:
   12875 	wm_tbi_serdes_set_linkled(sc);
   12876 }
   12877 
   12878 /*
   12879  * wm_serdes_tick:
   12880  *
   12881  *	Check the link on serdes devices.
   12882  */
   12883 static void
   12884 wm_serdes_tick(struct wm_softc *sc)
   12885 {
   12886 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12887 	struct mii_data *mii = &sc->sc_mii;
   12888 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12889 	uint32_t reg;
   12890 
   12891 	KASSERT(WM_CORE_LOCKED(sc));
   12892 
   12893 	mii->mii_media_status = IFM_AVALID;
   12894 	mii->mii_media_active = IFM_ETHER;
   12895 
   12896 	/* Check PCS */
   12897 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12898 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12899 		mii->mii_media_status |= IFM_ACTIVE;
   12900 		sc->sc_tbi_linkup = 1;
   12901 		sc->sc_tbi_serdes_ticks = 0;
   12902 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12903 		if ((reg & PCS_LSTS_FDX) != 0)
   12904 			mii->mii_media_active |= IFM_FDX;
   12905 		else
   12906 			mii->mii_media_active |= IFM_HDX;
   12907 	} else {
   12908 		mii->mii_media_status |= IFM_NONE;
   12909 		sc->sc_tbi_linkup = 0;
   12910 		/* If the timer expired, retry autonegotiation */
   12911 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12912 		    && (++sc->sc_tbi_serdes_ticks
   12913 			>= sc->sc_tbi_serdes_anegticks)) {
   12914 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12915 				device_xname(sc->sc_dev), __func__));
   12916 			sc->sc_tbi_serdes_ticks = 0;
   12917 			/* XXX */
   12918 			wm_serdes_mediachange(ifp);
   12919 		}
   12920 	}
   12921 
   12922 	wm_tbi_serdes_set_linkled(sc);
   12923 }
   12924 
   12925 /* SFP related */
   12926 
   12927 static int
   12928 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12929 {
   12930 	uint32_t i2ccmd;
   12931 	int i;
   12932 
   12933 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12934 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12935 
   12936 	/* Poll the ready bit */
   12937 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12938 		delay(50);
   12939 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12940 		if (i2ccmd & I2CCMD_READY)
   12941 			break;
   12942 	}
   12943 	if ((i2ccmd & I2CCMD_READY) == 0)
   12944 		return -1;
   12945 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12946 		return -1;
   12947 
   12948 	*data = i2ccmd & 0x00ff;
   12949 
   12950 	return 0;
   12951 }
   12952 
   12953 static uint32_t
   12954 wm_sfp_get_media_type(struct wm_softc *sc)
   12955 {
   12956 	uint32_t ctrl_ext;
   12957 	uint8_t val = 0;
   12958 	int timeout = 3;
   12959 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12960 	int rv = -1;
   12961 
   12962 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12963 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12964 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12965 	CSR_WRITE_FLUSH(sc);
   12966 
   12967 	/* Read SFP module data */
   12968 	while (timeout) {
   12969 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12970 		if (rv == 0)
   12971 			break;
   12972 		delay(100*1000); /* XXX too big */
   12973 		timeout--;
   12974 	}
   12975 	if (rv != 0)
   12976 		goto out;
   12977 
   12978 	switch (val) {
   12979 	case SFF_SFP_ID_SFF:
   12980 		aprint_normal_dev(sc->sc_dev,
   12981 		    "Module/Connector soldered to board\n");
   12982 		break;
   12983 	case SFF_SFP_ID_SFP:
   12984 		sc->sc_flags |= WM_F_SFP;
   12985 		break;
   12986 	case SFF_SFP_ID_UNKNOWN:
   12987 		goto out;
   12988 	default:
   12989 		break;
   12990 	}
   12991 
   12992 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12993 	if (rv != 0)
   12994 		goto out;
   12995 
   12996 	sc->sc_sfptype = val;
   12997 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12998 		mediatype = WM_MEDIATYPE_SERDES;
   12999 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13000 		sc->sc_flags |= WM_F_SGMII;
   13001 		mediatype = WM_MEDIATYPE_COPPER;
   13002 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13003 		sc->sc_flags |= WM_F_SGMII;
   13004 		mediatype = WM_MEDIATYPE_SERDES;
   13005 	} else {
   13006 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13007 		    __func__, sc->sc_sfptype);
   13008 		sc->sc_sfptype = 0; /* XXX unknown */
   13009 	}
   13010 
   13011 out:
   13012 	/* Restore I2C interface setting */
   13013 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13014 
   13015 	return mediatype;
   13016 }
   13017 
   13018 /*
   13019  * NVM related.
   13020  * Microwire, SPI (w/wo EERD) and Flash.
   13021  */
   13022 
   13023 /* Both spi and uwire */
   13024 
   13025 /*
   13026  * wm_eeprom_sendbits:
   13027  *
   13028  *	Send a series of bits to the EEPROM.
   13029  */
   13030 static void
   13031 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13032 {
   13033 	uint32_t reg;
   13034 	int x;
   13035 
   13036 	reg = CSR_READ(sc, WMREG_EECD);
   13037 
   13038 	for (x = nbits; x > 0; x--) {
   13039 		if (bits & (1U << (x - 1)))
   13040 			reg |= EECD_DI;
   13041 		else
   13042 			reg &= ~EECD_DI;
   13043 		CSR_WRITE(sc, WMREG_EECD, reg);
   13044 		CSR_WRITE_FLUSH(sc);
   13045 		delay(2);
   13046 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13047 		CSR_WRITE_FLUSH(sc);
   13048 		delay(2);
   13049 		CSR_WRITE(sc, WMREG_EECD, reg);
   13050 		CSR_WRITE_FLUSH(sc);
   13051 		delay(2);
   13052 	}
   13053 }
   13054 
   13055 /*
   13056  * wm_eeprom_recvbits:
   13057  *
   13058  *	Receive a series of bits from the EEPROM.
   13059  */
   13060 static void
   13061 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13062 {
   13063 	uint32_t reg, val;
   13064 	int x;
   13065 
   13066 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13067 
   13068 	val = 0;
   13069 	for (x = nbits; x > 0; x--) {
   13070 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13071 		CSR_WRITE_FLUSH(sc);
   13072 		delay(2);
   13073 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13074 			val |= (1U << (x - 1));
   13075 		CSR_WRITE(sc, WMREG_EECD, reg);
   13076 		CSR_WRITE_FLUSH(sc);
   13077 		delay(2);
   13078 	}
   13079 	*valp = val;
   13080 }
   13081 
   13082 /* Microwire */
   13083 
   13084 /*
   13085  * wm_nvm_read_uwire:
   13086  *
   13087  *	Read a word from the EEPROM using the MicroWire protocol.
   13088  */
   13089 static int
   13090 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13091 {
   13092 	uint32_t reg, val;
   13093 	int i;
   13094 
   13095 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13096 		device_xname(sc->sc_dev), __func__));
   13097 
   13098 	if (sc->nvm.acquire(sc) != 0)
   13099 		return -1;
   13100 
   13101 	for (i = 0; i < wordcnt; i++) {
   13102 		/* Clear SK and DI. */
   13103 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13104 		CSR_WRITE(sc, WMREG_EECD, reg);
   13105 
   13106 		/*
   13107 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13108 		 * and Xen.
   13109 		 *
   13110 		 * We use this workaround only for 82540 because qemu's
   13111 		 * e1000 act as 82540.
   13112 		 */
   13113 		if (sc->sc_type == WM_T_82540) {
   13114 			reg |= EECD_SK;
   13115 			CSR_WRITE(sc, WMREG_EECD, reg);
   13116 			reg &= ~EECD_SK;
   13117 			CSR_WRITE(sc, WMREG_EECD, reg);
   13118 			CSR_WRITE_FLUSH(sc);
   13119 			delay(2);
   13120 		}
   13121 		/* XXX: end of workaround */
   13122 
   13123 		/* Set CHIP SELECT. */
   13124 		reg |= EECD_CS;
   13125 		CSR_WRITE(sc, WMREG_EECD, reg);
   13126 		CSR_WRITE_FLUSH(sc);
   13127 		delay(2);
   13128 
   13129 		/* Shift in the READ command. */
   13130 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13131 
   13132 		/* Shift in address. */
   13133 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13134 
   13135 		/* Shift out the data. */
   13136 		wm_eeprom_recvbits(sc, &val, 16);
   13137 		data[i] = val & 0xffff;
   13138 
   13139 		/* Clear CHIP SELECT. */
   13140 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13141 		CSR_WRITE(sc, WMREG_EECD, reg);
   13142 		CSR_WRITE_FLUSH(sc);
   13143 		delay(2);
   13144 	}
   13145 
   13146 	sc->nvm.release(sc);
   13147 	return 0;
   13148 }
   13149 
   13150 /* SPI */
   13151 
   13152 /*
   13153  * Set SPI and FLASH related information from the EECD register.
   13154  * For 82541 and 82547, the word size is taken from EEPROM.
   13155  */
   13156 static int
   13157 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13158 {
   13159 	int size;
   13160 	uint32_t reg;
   13161 	uint16_t data;
   13162 
   13163 	reg = CSR_READ(sc, WMREG_EECD);
   13164 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13165 
   13166 	/* Read the size of NVM from EECD by default */
   13167 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13168 	switch (sc->sc_type) {
   13169 	case WM_T_82541:
   13170 	case WM_T_82541_2:
   13171 	case WM_T_82547:
   13172 	case WM_T_82547_2:
   13173 		/* Set dummy value to access EEPROM */
   13174 		sc->sc_nvm_wordsize = 64;
   13175 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13176 			aprint_error_dev(sc->sc_dev,
   13177 			    "%s: failed to read EEPROM size\n", __func__);
   13178 		}
   13179 		reg = data;
   13180 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13181 		if (size == 0)
   13182 			size = 6; /* 64 word size */
   13183 		else
   13184 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13185 		break;
   13186 	case WM_T_80003:
   13187 	case WM_T_82571:
   13188 	case WM_T_82572:
   13189 	case WM_T_82573: /* SPI case */
   13190 	case WM_T_82574: /* SPI case */
   13191 	case WM_T_82583: /* SPI case */
   13192 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13193 		if (size > 14)
   13194 			size = 14;
   13195 		break;
   13196 	case WM_T_82575:
   13197 	case WM_T_82576:
   13198 	case WM_T_82580:
   13199 	case WM_T_I350:
   13200 	case WM_T_I354:
   13201 	case WM_T_I210:
   13202 	case WM_T_I211:
   13203 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13204 		if (size > 15)
   13205 			size = 15;
   13206 		break;
   13207 	default:
   13208 		aprint_error_dev(sc->sc_dev,
   13209 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13210 		return -1;
   13211 		break;
   13212 	}
   13213 
   13214 	sc->sc_nvm_wordsize = 1 << size;
   13215 
   13216 	return 0;
   13217 }
   13218 
   13219 /*
   13220  * wm_nvm_ready_spi:
   13221  *
   13222  *	Wait for a SPI EEPROM to be ready for commands.
   13223  */
   13224 static int
   13225 wm_nvm_ready_spi(struct wm_softc *sc)
   13226 {
   13227 	uint32_t val;
   13228 	int usec;
   13229 
   13230 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13231 		device_xname(sc->sc_dev), __func__));
   13232 
   13233 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13234 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13235 		wm_eeprom_recvbits(sc, &val, 8);
   13236 		if ((val & SPI_SR_RDY) == 0)
   13237 			break;
   13238 	}
   13239 	if (usec >= SPI_MAX_RETRIES) {
   13240 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13241 		return -1;
   13242 	}
   13243 	return 0;
   13244 }
   13245 
   13246 /*
   13247  * wm_nvm_read_spi:
   13248  *
   13249  *	Read a work from the EEPROM using the SPI protocol.
   13250  */
   13251 static int
   13252 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13253 {
   13254 	uint32_t reg, val;
   13255 	int i;
   13256 	uint8_t opc;
   13257 	int rv = 0;
   13258 
   13259 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13260 		device_xname(sc->sc_dev), __func__));
   13261 
   13262 	if (sc->nvm.acquire(sc) != 0)
   13263 		return -1;
   13264 
   13265 	/* Clear SK and CS. */
   13266 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13267 	CSR_WRITE(sc, WMREG_EECD, reg);
   13268 	CSR_WRITE_FLUSH(sc);
   13269 	delay(2);
   13270 
   13271 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13272 		goto out;
   13273 
   13274 	/* Toggle CS to flush commands. */
   13275 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13276 	CSR_WRITE_FLUSH(sc);
   13277 	delay(2);
   13278 	CSR_WRITE(sc, WMREG_EECD, reg);
   13279 	CSR_WRITE_FLUSH(sc);
   13280 	delay(2);
   13281 
   13282 	opc = SPI_OPC_READ;
   13283 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13284 		opc |= SPI_OPC_A8;
   13285 
   13286 	wm_eeprom_sendbits(sc, opc, 8);
   13287 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13288 
   13289 	for (i = 0; i < wordcnt; i++) {
   13290 		wm_eeprom_recvbits(sc, &val, 16);
   13291 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13292 	}
   13293 
   13294 	/* Raise CS and clear SK. */
   13295 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13296 	CSR_WRITE(sc, WMREG_EECD, reg);
   13297 	CSR_WRITE_FLUSH(sc);
   13298 	delay(2);
   13299 
   13300 out:
   13301 	sc->nvm.release(sc);
   13302 	return rv;
   13303 }
   13304 
   13305 /* Using with EERD */
   13306 
   13307 static int
   13308 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13309 {
   13310 	uint32_t attempts = 100000;
   13311 	uint32_t i, reg = 0;
   13312 	int32_t done = -1;
   13313 
   13314 	for (i = 0; i < attempts; i++) {
   13315 		reg = CSR_READ(sc, rw);
   13316 
   13317 		if (reg & EERD_DONE) {
   13318 			done = 0;
   13319 			break;
   13320 		}
   13321 		delay(5);
   13322 	}
   13323 
   13324 	return done;
   13325 }
   13326 
   13327 static int
   13328 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13329 {
   13330 	int i, eerd = 0;
   13331 	int rv = 0;
   13332 
   13333 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13334 		device_xname(sc->sc_dev), __func__));
   13335 
   13336 	if (sc->nvm.acquire(sc) != 0)
   13337 		return -1;
   13338 
   13339 	for (i = 0; i < wordcnt; i++) {
   13340 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13341 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13342 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13343 		if (rv != 0) {
   13344 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13345 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13346 			break;
   13347 		}
   13348 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13349 	}
   13350 
   13351 	sc->nvm.release(sc);
   13352 	return rv;
   13353 }
   13354 
   13355 /* Flash */
   13356 
   13357 static int
   13358 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13359 {
   13360 	uint32_t eecd;
   13361 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13362 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13363 	uint32_t nvm_dword = 0;
   13364 	uint8_t sig_byte = 0;
   13365 	int rv;
   13366 
   13367 	switch (sc->sc_type) {
   13368 	case WM_T_PCH_SPT:
   13369 	case WM_T_PCH_CNP:
   13370 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13371 		act_offset = ICH_NVM_SIG_WORD * 2;
   13372 
   13373 		/* Set bank to 0 in case flash read fails. */
   13374 		*bank = 0;
   13375 
   13376 		/* Check bank 0 */
   13377 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13378 		if (rv != 0)
   13379 			return rv;
   13380 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13381 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13382 			*bank = 0;
   13383 			return 0;
   13384 		}
   13385 
   13386 		/* Check bank 1 */
   13387 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13388 		    &nvm_dword);
   13389 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13390 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13391 			*bank = 1;
   13392 			return 0;
   13393 		}
   13394 		aprint_error_dev(sc->sc_dev,
   13395 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13396 		return -1;
   13397 	case WM_T_ICH8:
   13398 	case WM_T_ICH9:
   13399 		eecd = CSR_READ(sc, WMREG_EECD);
   13400 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13401 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13402 			return 0;
   13403 		}
   13404 		/* FALLTHROUGH */
   13405 	default:
   13406 		/* Default to 0 */
   13407 		*bank = 0;
   13408 
   13409 		/* Check bank 0 */
   13410 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13411 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13412 			*bank = 0;
   13413 			return 0;
   13414 		}
   13415 
   13416 		/* Check bank 1 */
   13417 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13418 		    &sig_byte);
   13419 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13420 			*bank = 1;
   13421 			return 0;
   13422 		}
   13423 	}
   13424 
   13425 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13426 		device_xname(sc->sc_dev)));
   13427 	return -1;
   13428 }
   13429 
   13430 /******************************************************************************
   13431  * This function does initial flash setup so that a new read/write/erase cycle
   13432  * can be started.
   13433  *
   13434  * sc - The pointer to the hw structure
   13435  ****************************************************************************/
   13436 static int32_t
   13437 wm_ich8_cycle_init(struct wm_softc *sc)
   13438 {
   13439 	uint16_t hsfsts;
   13440 	int32_t error = 1;
   13441 	int32_t i     = 0;
   13442 
   13443 	if (sc->sc_type >= WM_T_PCH_SPT)
   13444 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13445 	else
   13446 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13447 
   13448 	/* May be check the Flash Des Valid bit in Hw status */
   13449 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13450 		return error;
   13451 
   13452 	/* Clear FCERR in Hw status by writing 1 */
   13453 	/* Clear DAEL in Hw status by writing a 1 */
   13454 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13455 
   13456 	if (sc->sc_type >= WM_T_PCH_SPT)
   13457 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13458 	else
   13459 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13460 
   13461 	/*
   13462 	 * Either we should have a hardware SPI cycle in progress bit to check
   13463 	 * against, in order to start a new cycle or FDONE bit should be
   13464 	 * changed in the hardware so that it is 1 after hardware reset, which
   13465 	 * can then be used as an indication whether a cycle is in progress or
   13466 	 * has been completed .. we should also have some software semaphore
   13467 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13468 	 * threads access to those bits can be sequentiallized or a way so that
   13469 	 * 2 threads don't start the cycle at the same time
   13470 	 */
   13471 
   13472 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13473 		/*
   13474 		 * There is no cycle running at present, so we can start a
   13475 		 * cycle
   13476 		 */
   13477 
   13478 		/* Begin by setting Flash Cycle Done. */
   13479 		hsfsts |= HSFSTS_DONE;
   13480 		if (sc->sc_type >= WM_T_PCH_SPT)
   13481 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13482 			    hsfsts & 0xffffUL);
   13483 		else
   13484 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13485 		error = 0;
   13486 	} else {
   13487 		/*
   13488 		 * Otherwise poll for sometime so the current cycle has a
   13489 		 * chance to end before giving up.
   13490 		 */
   13491 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13492 			if (sc->sc_type >= WM_T_PCH_SPT)
   13493 				hsfsts = ICH8_FLASH_READ32(sc,
   13494 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13495 			else
   13496 				hsfsts = ICH8_FLASH_READ16(sc,
   13497 				    ICH_FLASH_HSFSTS);
   13498 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13499 				error = 0;
   13500 				break;
   13501 			}
   13502 			delay(1);
   13503 		}
   13504 		if (error == 0) {
   13505 			/*
   13506 			 * Successful in waiting for previous cycle to timeout,
   13507 			 * now set the Flash Cycle Done.
   13508 			 */
   13509 			hsfsts |= HSFSTS_DONE;
   13510 			if (sc->sc_type >= WM_T_PCH_SPT)
   13511 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13512 				    hsfsts & 0xffffUL);
   13513 			else
   13514 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13515 				    hsfsts);
   13516 		}
   13517 	}
   13518 	return error;
   13519 }
   13520 
   13521 /******************************************************************************
   13522  * This function starts a flash cycle and waits for its completion
   13523  *
   13524  * sc - The pointer to the hw structure
   13525  ****************************************************************************/
   13526 static int32_t
   13527 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13528 {
   13529 	uint16_t hsflctl;
   13530 	uint16_t hsfsts;
   13531 	int32_t error = 1;
   13532 	uint32_t i = 0;
   13533 
   13534 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13535 	if (sc->sc_type >= WM_T_PCH_SPT)
   13536 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13537 	else
   13538 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13539 	hsflctl |= HSFCTL_GO;
   13540 	if (sc->sc_type >= WM_T_PCH_SPT)
   13541 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13542 		    (uint32_t)hsflctl << 16);
   13543 	else
   13544 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13545 
   13546 	/* Wait till FDONE bit is set to 1 */
   13547 	do {
   13548 		if (sc->sc_type >= WM_T_PCH_SPT)
   13549 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13550 			    & 0xffffUL;
   13551 		else
   13552 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13553 		if (hsfsts & HSFSTS_DONE)
   13554 			break;
   13555 		delay(1);
   13556 		i++;
   13557 	} while (i < timeout);
   13558 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13559 		error = 0;
   13560 
   13561 	return error;
   13562 }
   13563 
   13564 /******************************************************************************
   13565  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13566  *
   13567  * sc - The pointer to the hw structure
   13568  * index - The index of the byte or word to read.
   13569  * size - Size of data to read, 1=byte 2=word, 4=dword
   13570  * data - Pointer to the word to store the value read.
   13571  *****************************************************************************/
   13572 static int32_t
   13573 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13574     uint32_t size, uint32_t *data)
   13575 {
   13576 	uint16_t hsfsts;
   13577 	uint16_t hsflctl;
   13578 	uint32_t flash_linear_address;
   13579 	uint32_t flash_data = 0;
   13580 	int32_t error = 1;
   13581 	int32_t count = 0;
   13582 
   13583 	if (size < 1  || size > 4 || data == 0x0 ||
   13584 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13585 		return error;
   13586 
   13587 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13588 	    sc->sc_ich8_flash_base;
   13589 
   13590 	do {
   13591 		delay(1);
   13592 		/* Steps */
   13593 		error = wm_ich8_cycle_init(sc);
   13594 		if (error)
   13595 			break;
   13596 
   13597 		if (sc->sc_type >= WM_T_PCH_SPT)
   13598 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13599 			    >> 16;
   13600 		else
   13601 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13602 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13603 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13604 		    & HSFCTL_BCOUNT_MASK;
   13605 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13606 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13607 			/*
   13608 			 * In SPT, This register is in Lan memory space, not
   13609 			 * flash. Therefore, only 32 bit access is supported.
   13610 			 */
   13611 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13612 			    (uint32_t)hsflctl << 16);
   13613 		} else
   13614 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13615 
   13616 		/*
   13617 		 * Write the last 24 bits of index into Flash Linear address
   13618 		 * field in Flash Address
   13619 		 */
   13620 		/* TODO: TBD maybe check the index against the size of flash */
   13621 
   13622 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13623 
   13624 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13625 
   13626 		/*
   13627 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13628 		 * the whole sequence a few more times, else read in (shift in)
   13629 		 * the Flash Data0, the order is least significant byte first
   13630 		 * msb to lsb
   13631 		 */
   13632 		if (error == 0) {
   13633 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13634 			if (size == 1)
   13635 				*data = (uint8_t)(flash_data & 0x000000FF);
   13636 			else if (size == 2)
   13637 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13638 			else if (size == 4)
   13639 				*data = (uint32_t)flash_data;
   13640 			break;
   13641 		} else {
   13642 			/*
   13643 			 * If we've gotten here, then things are probably
   13644 			 * completely hosed, but if the error condition is
   13645 			 * detected, it won't hurt to give it another try...
   13646 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13647 			 */
   13648 			if (sc->sc_type >= WM_T_PCH_SPT)
   13649 				hsfsts = ICH8_FLASH_READ32(sc,
   13650 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13651 			else
   13652 				hsfsts = ICH8_FLASH_READ16(sc,
   13653 				    ICH_FLASH_HSFSTS);
   13654 
   13655 			if (hsfsts & HSFSTS_ERR) {
   13656 				/* Repeat for some time before giving up. */
   13657 				continue;
   13658 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13659 				break;
   13660 		}
   13661 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13662 
   13663 	return error;
   13664 }
   13665 
   13666 /******************************************************************************
   13667  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13668  *
   13669  * sc - pointer to wm_hw structure
   13670  * index - The index of the byte to read.
   13671  * data - Pointer to a byte to store the value read.
   13672  *****************************************************************************/
   13673 static int32_t
   13674 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13675 {
   13676 	int32_t status;
   13677 	uint32_t word = 0;
   13678 
   13679 	status = wm_read_ich8_data(sc, index, 1, &word);
   13680 	if (status == 0)
   13681 		*data = (uint8_t)word;
   13682 	else
   13683 		*data = 0;
   13684 
   13685 	return status;
   13686 }
   13687 
   13688 /******************************************************************************
   13689  * Reads a word from the NVM using the ICH8 flash access registers.
   13690  *
   13691  * sc - pointer to wm_hw structure
   13692  * index - The starting byte index of the word to read.
   13693  * data - Pointer to a word to store the value read.
   13694  *****************************************************************************/
   13695 static int32_t
   13696 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13697 {
   13698 	int32_t status;
   13699 	uint32_t word = 0;
   13700 
   13701 	status = wm_read_ich8_data(sc, index, 2, &word);
   13702 	if (status == 0)
   13703 		*data = (uint16_t)word;
   13704 	else
   13705 		*data = 0;
   13706 
   13707 	return status;
   13708 }
   13709 
   13710 /******************************************************************************
   13711  * Reads a dword from the NVM using the ICH8 flash access registers.
   13712  *
   13713  * sc - pointer to wm_hw structure
   13714  * index - The starting byte index of the word to read.
   13715  * data - Pointer to a word to store the value read.
   13716  *****************************************************************************/
   13717 static int32_t
   13718 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13719 {
   13720 	int32_t status;
   13721 
   13722 	status = wm_read_ich8_data(sc, index, 4, data);
   13723 	return status;
   13724 }
   13725 
   13726 /******************************************************************************
   13727  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13728  * register.
   13729  *
   13730  * sc - Struct containing variables accessed by shared code
   13731  * offset - offset of word in the EEPROM to read
   13732  * data - word read from the EEPROM
   13733  * words - number of words to read
   13734  *****************************************************************************/
   13735 static int
   13736 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13737 {
   13738 	int32_t	 rv = 0;
   13739 	uint32_t flash_bank = 0;
   13740 	uint32_t act_offset = 0;
   13741 	uint32_t bank_offset = 0;
   13742 	uint16_t word = 0;
   13743 	uint16_t i = 0;
   13744 
   13745 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13746 		device_xname(sc->sc_dev), __func__));
   13747 
   13748 	if (sc->nvm.acquire(sc) != 0)
   13749 		return -1;
   13750 
   13751 	/*
   13752 	 * We need to know which is the valid flash bank.  In the event
   13753 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13754 	 * managing flash_bank. So it cannot be trusted and needs
   13755 	 * to be updated with each read.
   13756 	 */
   13757 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13758 	if (rv) {
   13759 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13760 			device_xname(sc->sc_dev)));
   13761 		flash_bank = 0;
   13762 	}
   13763 
   13764 	/*
   13765 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13766 	 * size
   13767 	 */
   13768 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13769 
   13770 	for (i = 0; i < words; i++) {
   13771 		/* The NVM part needs a byte offset, hence * 2 */
   13772 		act_offset = bank_offset + ((offset + i) * 2);
   13773 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13774 		if (rv) {
   13775 			aprint_error_dev(sc->sc_dev,
   13776 			    "%s: failed to read NVM\n", __func__);
   13777 			break;
   13778 		}
   13779 		data[i] = word;
   13780 	}
   13781 
   13782 	sc->nvm.release(sc);
   13783 	return rv;
   13784 }
   13785 
   13786 /******************************************************************************
   13787  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13788  * register.
   13789  *
   13790  * sc - Struct containing variables accessed by shared code
   13791  * offset - offset of word in the EEPROM to read
   13792  * data - word read from the EEPROM
   13793  * words - number of words to read
   13794  *****************************************************************************/
   13795 static int
   13796 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13797 {
   13798 	int32_t	 rv = 0;
   13799 	uint32_t flash_bank = 0;
   13800 	uint32_t act_offset = 0;
   13801 	uint32_t bank_offset = 0;
   13802 	uint32_t dword = 0;
   13803 	uint16_t i = 0;
   13804 
   13805 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13806 		device_xname(sc->sc_dev), __func__));
   13807 
   13808 	if (sc->nvm.acquire(sc) != 0)
   13809 		return -1;
   13810 
   13811 	/*
   13812 	 * We need to know which is the valid flash bank.  In the event
   13813 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13814 	 * managing flash_bank. So it cannot be trusted and needs
   13815 	 * to be updated with each read.
   13816 	 */
   13817 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13818 	if (rv) {
   13819 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13820 			device_xname(sc->sc_dev)));
   13821 		flash_bank = 0;
   13822 	}
   13823 
   13824 	/*
   13825 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13826 	 * size
   13827 	 */
   13828 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13829 
   13830 	for (i = 0; i < words; i++) {
   13831 		/* The NVM part needs a byte offset, hence * 2 */
   13832 		act_offset = bank_offset + ((offset + i) * 2);
   13833 		/* but we must read dword aligned, so mask ... */
   13834 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13835 		if (rv) {
   13836 			aprint_error_dev(sc->sc_dev,
   13837 			    "%s: failed to read NVM\n", __func__);
   13838 			break;
   13839 		}
   13840 		/* ... and pick out low or high word */
   13841 		if ((act_offset & 0x2) == 0)
   13842 			data[i] = (uint16_t)(dword & 0xFFFF);
   13843 		else
   13844 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13845 	}
   13846 
   13847 	sc->nvm.release(sc);
   13848 	return rv;
   13849 }
   13850 
   13851 /* iNVM */
   13852 
   13853 static int
   13854 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13855 {
   13856 	int32_t	 rv = 0;
   13857 	uint32_t invm_dword;
   13858 	uint16_t i;
   13859 	uint8_t record_type, word_address;
   13860 
   13861 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13862 		device_xname(sc->sc_dev), __func__));
   13863 
   13864 	for (i = 0; i < INVM_SIZE; i++) {
   13865 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13866 		/* Get record type */
   13867 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13868 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13869 			break;
   13870 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13871 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13872 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13873 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13874 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13875 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13876 			if (word_address == address) {
   13877 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13878 				rv = 0;
   13879 				break;
   13880 			}
   13881 		}
   13882 	}
   13883 
   13884 	return rv;
   13885 }
   13886 
   13887 static int
   13888 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13889 {
   13890 	int rv = 0;
   13891 	int i;
   13892 
   13893 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13894 		device_xname(sc->sc_dev), __func__));
   13895 
   13896 	if (sc->nvm.acquire(sc) != 0)
   13897 		return -1;
   13898 
   13899 	for (i = 0; i < words; i++) {
   13900 		switch (offset + i) {
   13901 		case NVM_OFF_MACADDR:
   13902 		case NVM_OFF_MACADDR1:
   13903 		case NVM_OFF_MACADDR2:
   13904 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13905 			if (rv != 0) {
   13906 				data[i] = 0xffff;
   13907 				rv = -1;
   13908 			}
   13909 			break;
   13910 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13911 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13912 			if (rv != 0) {
   13913 				*data = INVM_DEFAULT_AL;
   13914 				rv = 0;
   13915 			}
   13916 			break;
   13917 		case NVM_OFF_CFG2:
   13918 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13919 			if (rv != 0) {
   13920 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13921 				rv = 0;
   13922 			}
   13923 			break;
   13924 		case NVM_OFF_CFG4:
   13925 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13926 			if (rv != 0) {
   13927 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13928 				rv = 0;
   13929 			}
   13930 			break;
   13931 		case NVM_OFF_LED_1_CFG:
   13932 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13933 			if (rv != 0) {
   13934 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13935 				rv = 0;
   13936 			}
   13937 			break;
   13938 		case NVM_OFF_LED_0_2_CFG:
   13939 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13940 			if (rv != 0) {
   13941 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13942 				rv = 0;
   13943 			}
   13944 			break;
   13945 		case NVM_OFF_ID_LED_SETTINGS:
   13946 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13947 			if (rv != 0) {
   13948 				*data = ID_LED_RESERVED_FFFF;
   13949 				rv = 0;
   13950 			}
   13951 			break;
   13952 		default:
   13953 			DPRINTF(sc, WM_DEBUG_NVM,
   13954 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13955 			*data = NVM_RESERVED_WORD;
   13956 			break;
   13957 		}
   13958 	}
   13959 
   13960 	sc->nvm.release(sc);
   13961 	return rv;
   13962 }
   13963 
   13964 /* Lock, detecting NVM type, validate checksum, version and read */
   13965 
   13966 static int
   13967 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13968 {
   13969 	uint32_t eecd = 0;
   13970 
   13971 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13972 	    || sc->sc_type == WM_T_82583) {
   13973 		eecd = CSR_READ(sc, WMREG_EECD);
   13974 
   13975 		/* Isolate bits 15 & 16 */
   13976 		eecd = ((eecd >> 15) & 0x03);
   13977 
   13978 		/* If both bits are set, device is Flash type */
   13979 		if (eecd == 0x03)
   13980 			return 0;
   13981 	}
   13982 	return 1;
   13983 }
   13984 
   13985 static int
   13986 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13987 {
   13988 	uint32_t eec;
   13989 
   13990 	eec = CSR_READ(sc, WMREG_EEC);
   13991 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13992 		return 1;
   13993 
   13994 	return 0;
   13995 }
   13996 
   13997 /*
   13998  * wm_nvm_validate_checksum
   13999  *
   14000  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14001  */
   14002 static int
   14003 wm_nvm_validate_checksum(struct wm_softc *sc)
   14004 {
   14005 	uint16_t checksum;
   14006 	uint16_t eeprom_data;
   14007 #ifdef WM_DEBUG
   14008 	uint16_t csum_wordaddr, valid_checksum;
   14009 #endif
   14010 	int i;
   14011 
   14012 	checksum = 0;
   14013 
   14014 	/* Don't check for I211 */
   14015 	if (sc->sc_type == WM_T_I211)
   14016 		return 0;
   14017 
   14018 #ifdef WM_DEBUG
   14019 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14020 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14021 		csum_wordaddr = NVM_OFF_COMPAT;
   14022 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14023 	} else {
   14024 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14025 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14026 	}
   14027 
   14028 	/* Dump EEPROM image for debug */
   14029 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14030 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14031 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14032 		/* XXX PCH_SPT? */
   14033 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14034 		if ((eeprom_data & valid_checksum) == 0)
   14035 			DPRINTF(sc, WM_DEBUG_NVM,
   14036 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14037 				device_xname(sc->sc_dev), eeprom_data,
   14038 				    valid_checksum));
   14039 	}
   14040 
   14041 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14042 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14043 		for (i = 0; i < NVM_SIZE; i++) {
   14044 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14045 				printf("XXXX ");
   14046 			else
   14047 				printf("%04hx ", eeprom_data);
   14048 			if (i % 8 == 7)
   14049 				printf("\n");
   14050 		}
   14051 	}
   14052 
   14053 #endif /* WM_DEBUG */
   14054 
   14055 	for (i = 0; i < NVM_SIZE; i++) {
   14056 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14057 			return 1;
   14058 		checksum += eeprom_data;
   14059 	}
   14060 
   14061 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14062 #ifdef WM_DEBUG
   14063 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14064 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14065 #endif
   14066 	}
   14067 
   14068 	return 0;
   14069 }
   14070 
   14071 static void
   14072 wm_nvm_version_invm(struct wm_softc *sc)
   14073 {
   14074 	uint32_t dword;
   14075 
   14076 	/*
   14077 	 * Linux's code to decode version is very strange, so we don't
   14078 	 * obey that algorithm and just use word 61 as the document.
   14079 	 * Perhaps it's not perfect though...
   14080 	 *
   14081 	 * Example:
   14082 	 *
   14083 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14084 	 */
   14085 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14086 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14087 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14088 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14089 }
   14090 
   14091 static void
   14092 wm_nvm_version(struct wm_softc *sc)
   14093 {
   14094 	uint16_t major, minor, build, patch;
   14095 	uint16_t uid0, uid1;
   14096 	uint16_t nvm_data;
   14097 	uint16_t off;
   14098 	bool check_version = false;
   14099 	bool check_optionrom = false;
   14100 	bool have_build = false;
   14101 	bool have_uid = true;
   14102 
   14103 	/*
   14104 	 * Version format:
   14105 	 *
   14106 	 * XYYZ
   14107 	 * X0YZ
   14108 	 * X0YY
   14109 	 *
   14110 	 * Example:
   14111 	 *
   14112 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14113 	 *	82571	0x50a6	5.10.6?
   14114 	 *	82572	0x506a	5.6.10?
   14115 	 *	82572EI	0x5069	5.6.9?
   14116 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14117 	 *		0x2013	2.1.3?
   14118 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14119 	 * ICH8+82567	0x0040	0.4.0?
   14120 	 * ICH9+82566	0x1040	1.4.0?
   14121 	 *ICH10+82567	0x0043	0.4.3?
   14122 	 *  PCH+82577	0x00c1	0.12.1?
   14123 	 * PCH2+82579	0x00d3	0.13.3?
   14124 	 *		0x00d4	0.13.4?
   14125 	 *  LPT+I218	0x0023	0.2.3?
   14126 	 *  SPT+I219	0x0084	0.8.4?
   14127 	 *  CNP+I219	0x0054	0.5.4?
   14128 	 */
   14129 
   14130 	/*
   14131 	 * XXX
   14132 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14133 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   14134 	 */
   14135 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14136 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14137 		have_uid = false;
   14138 
   14139 	switch (sc->sc_type) {
   14140 	case WM_T_82571:
   14141 	case WM_T_82572:
   14142 	case WM_T_82574:
   14143 	case WM_T_82583:
   14144 		check_version = true;
   14145 		check_optionrom = true;
   14146 		have_build = true;
   14147 		break;
   14148 	case WM_T_ICH8:
   14149 	case WM_T_ICH9:
   14150 	case WM_T_ICH10:
   14151 	case WM_T_PCH:
   14152 	case WM_T_PCH2:
   14153 	case WM_T_PCH_LPT:
   14154 	case WM_T_PCH_SPT:
   14155 	case WM_T_PCH_CNP:
   14156 		check_version = true;
   14157 		have_build = true;
   14158 		have_uid = false;
   14159 		break;
   14160 	case WM_T_82575:
   14161 	case WM_T_82576:
   14162 	case WM_T_82580:
   14163 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14164 			check_version = true;
   14165 		break;
   14166 	case WM_T_I211:
   14167 		wm_nvm_version_invm(sc);
   14168 		have_uid = false;
   14169 		goto printver;
   14170 	case WM_T_I210:
   14171 		if (!wm_nvm_flash_presence_i210(sc)) {
   14172 			wm_nvm_version_invm(sc);
   14173 			have_uid = false;
   14174 			goto printver;
   14175 		}
   14176 		/* FALLTHROUGH */
   14177 	case WM_T_I350:
   14178 	case WM_T_I354:
   14179 		check_version = true;
   14180 		check_optionrom = true;
   14181 		break;
   14182 	default:
   14183 		return;
   14184 	}
   14185 	if (check_version
   14186 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14187 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14188 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14189 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14190 			build = nvm_data & NVM_BUILD_MASK;
   14191 			have_build = true;
   14192 		} else
   14193 			minor = nvm_data & 0x00ff;
   14194 
   14195 		/* Decimal */
   14196 		minor = (minor / 16) * 10 + (minor % 16);
   14197 		sc->sc_nvm_ver_major = major;
   14198 		sc->sc_nvm_ver_minor = minor;
   14199 
   14200 printver:
   14201 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14202 		    sc->sc_nvm_ver_minor);
   14203 		if (have_build) {
   14204 			sc->sc_nvm_ver_build = build;
   14205 			aprint_verbose(".%d", build);
   14206 		}
   14207 	}
   14208 
   14209 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14210 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14211 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14212 		/* Option ROM Version */
   14213 		if ((off != 0x0000) && (off != 0xffff)) {
   14214 			int rv;
   14215 
   14216 			off += NVM_COMBO_VER_OFF;
   14217 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14218 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14219 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14220 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14221 				/* 16bits */
   14222 				major = uid0 >> 8;
   14223 				build = (uid0 << 8) | (uid1 >> 8);
   14224 				patch = uid1 & 0x00ff;
   14225 				aprint_verbose(", option ROM Version %d.%d.%d",
   14226 				    major, build, patch);
   14227 			}
   14228 		}
   14229 	}
   14230 
   14231 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14232 		aprint_verbose(", Image Unique ID %08x",
   14233 		    ((uint32_t)uid1 << 16) | uid0);
   14234 }
   14235 
   14236 /*
   14237  * wm_nvm_read:
   14238  *
   14239  *	Read data from the serial EEPROM.
   14240  */
   14241 static int
   14242 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14243 {
   14244 	int rv;
   14245 
   14246 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14247 		device_xname(sc->sc_dev), __func__));
   14248 
   14249 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14250 		return -1;
   14251 
   14252 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14253 
   14254 	return rv;
   14255 }
   14256 
   14257 /*
   14258  * Hardware semaphores.
   14259  * Very complexed...
   14260  */
   14261 
   14262 static int
   14263 wm_get_null(struct wm_softc *sc)
   14264 {
   14265 
   14266 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14267 		device_xname(sc->sc_dev), __func__));
   14268 	return 0;
   14269 }
   14270 
   14271 static void
   14272 wm_put_null(struct wm_softc *sc)
   14273 {
   14274 
   14275 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14276 		device_xname(sc->sc_dev), __func__));
   14277 	return;
   14278 }
   14279 
   14280 static int
   14281 wm_get_eecd(struct wm_softc *sc)
   14282 {
   14283 	uint32_t reg;
   14284 	int x;
   14285 
   14286 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14287 		device_xname(sc->sc_dev), __func__));
   14288 
   14289 	reg = CSR_READ(sc, WMREG_EECD);
   14290 
   14291 	/* Request EEPROM access. */
   14292 	reg |= EECD_EE_REQ;
   14293 	CSR_WRITE(sc, WMREG_EECD, reg);
   14294 
   14295 	/* ..and wait for it to be granted. */
   14296 	for (x = 0; x < 1000; x++) {
   14297 		reg = CSR_READ(sc, WMREG_EECD);
   14298 		if (reg & EECD_EE_GNT)
   14299 			break;
   14300 		delay(5);
   14301 	}
   14302 	if ((reg & EECD_EE_GNT) == 0) {
   14303 		aprint_error_dev(sc->sc_dev,
   14304 		    "could not acquire EEPROM GNT\n");
   14305 		reg &= ~EECD_EE_REQ;
   14306 		CSR_WRITE(sc, WMREG_EECD, reg);
   14307 		return -1;
   14308 	}
   14309 
   14310 	return 0;
   14311 }
   14312 
   14313 static void
   14314 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14315 {
   14316 
   14317 	*eecd |= EECD_SK;
   14318 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14319 	CSR_WRITE_FLUSH(sc);
   14320 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14321 		delay(1);
   14322 	else
   14323 		delay(50);
   14324 }
   14325 
   14326 static void
   14327 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14328 {
   14329 
   14330 	*eecd &= ~EECD_SK;
   14331 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14332 	CSR_WRITE_FLUSH(sc);
   14333 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14334 		delay(1);
   14335 	else
   14336 		delay(50);
   14337 }
   14338 
   14339 static void
   14340 wm_put_eecd(struct wm_softc *sc)
   14341 {
   14342 	uint32_t reg;
   14343 
   14344 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14345 		device_xname(sc->sc_dev), __func__));
   14346 
   14347 	/* Stop nvm */
   14348 	reg = CSR_READ(sc, WMREG_EECD);
   14349 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14350 		/* Pull CS high */
   14351 		reg |= EECD_CS;
   14352 		wm_nvm_eec_clock_lower(sc, &reg);
   14353 	} else {
   14354 		/* CS on Microwire is active-high */
   14355 		reg &= ~(EECD_CS | EECD_DI);
   14356 		CSR_WRITE(sc, WMREG_EECD, reg);
   14357 		wm_nvm_eec_clock_raise(sc, &reg);
   14358 		wm_nvm_eec_clock_lower(sc, &reg);
   14359 	}
   14360 
   14361 	reg = CSR_READ(sc, WMREG_EECD);
   14362 	reg &= ~EECD_EE_REQ;
   14363 	CSR_WRITE(sc, WMREG_EECD, reg);
   14364 
   14365 	return;
   14366 }
   14367 
   14368 /*
   14369  * Get hardware semaphore.
   14370  * Same as e1000_get_hw_semaphore_generic()
   14371  */
   14372 static int
   14373 wm_get_swsm_semaphore(struct wm_softc *sc)
   14374 {
   14375 	int32_t timeout;
   14376 	uint32_t swsm;
   14377 
   14378 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14379 		device_xname(sc->sc_dev), __func__));
   14380 	KASSERT(sc->sc_nvm_wordsize > 0);
   14381 
   14382 retry:
   14383 	/* Get the SW semaphore. */
   14384 	timeout = sc->sc_nvm_wordsize + 1;
   14385 	while (timeout) {
   14386 		swsm = CSR_READ(sc, WMREG_SWSM);
   14387 
   14388 		if ((swsm & SWSM_SMBI) == 0)
   14389 			break;
   14390 
   14391 		delay(50);
   14392 		timeout--;
   14393 	}
   14394 
   14395 	if (timeout == 0) {
   14396 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14397 			/*
   14398 			 * In rare circumstances, the SW semaphore may already
   14399 			 * be held unintentionally. Clear the semaphore once
   14400 			 * before giving up.
   14401 			 */
   14402 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14403 			wm_put_swsm_semaphore(sc);
   14404 			goto retry;
   14405 		}
   14406 		aprint_error_dev(sc->sc_dev,
   14407 		    "could not acquire SWSM SMBI\n");
   14408 		return 1;
   14409 	}
   14410 
   14411 	/* Get the FW semaphore. */
   14412 	timeout = sc->sc_nvm_wordsize + 1;
   14413 	while (timeout) {
   14414 		swsm = CSR_READ(sc, WMREG_SWSM);
   14415 		swsm |= SWSM_SWESMBI;
   14416 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14417 		/* If we managed to set the bit we got the semaphore. */
   14418 		swsm = CSR_READ(sc, WMREG_SWSM);
   14419 		if (swsm & SWSM_SWESMBI)
   14420 			break;
   14421 
   14422 		delay(50);
   14423 		timeout--;
   14424 	}
   14425 
   14426 	if (timeout == 0) {
   14427 		aprint_error_dev(sc->sc_dev,
   14428 		    "could not acquire SWSM SWESMBI\n");
   14429 		/* Release semaphores */
   14430 		wm_put_swsm_semaphore(sc);
   14431 		return 1;
   14432 	}
   14433 	return 0;
   14434 }
   14435 
   14436 /*
   14437  * Put hardware semaphore.
   14438  * Same as e1000_put_hw_semaphore_generic()
   14439  */
   14440 static void
   14441 wm_put_swsm_semaphore(struct wm_softc *sc)
   14442 {
   14443 	uint32_t swsm;
   14444 
   14445 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14446 		device_xname(sc->sc_dev), __func__));
   14447 
   14448 	swsm = CSR_READ(sc, WMREG_SWSM);
   14449 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14450 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14451 }
   14452 
   14453 /*
   14454  * Get SW/FW semaphore.
   14455  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14456  */
   14457 static int
   14458 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14459 {
   14460 	uint32_t swfw_sync;
   14461 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14462 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14463 	int timeout;
   14464 
   14465 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14466 		device_xname(sc->sc_dev), __func__));
   14467 
   14468 	if (sc->sc_type == WM_T_80003)
   14469 		timeout = 50;
   14470 	else
   14471 		timeout = 200;
   14472 
   14473 	while (timeout) {
   14474 		if (wm_get_swsm_semaphore(sc)) {
   14475 			aprint_error_dev(sc->sc_dev,
   14476 			    "%s: failed to get semaphore\n",
   14477 			    __func__);
   14478 			return 1;
   14479 		}
   14480 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14481 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14482 			swfw_sync |= swmask;
   14483 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14484 			wm_put_swsm_semaphore(sc);
   14485 			return 0;
   14486 		}
   14487 		wm_put_swsm_semaphore(sc);
   14488 		delay(5000);
   14489 		timeout--;
   14490 	}
   14491 	device_printf(sc->sc_dev,
   14492 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14493 	    mask, swfw_sync);
   14494 	return 1;
   14495 }
   14496 
   14497 static void
   14498 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14499 {
   14500 	uint32_t swfw_sync;
   14501 
   14502 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14503 		device_xname(sc->sc_dev), __func__));
   14504 
   14505 	while (wm_get_swsm_semaphore(sc) != 0)
   14506 		continue;
   14507 
   14508 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14509 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14510 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14511 
   14512 	wm_put_swsm_semaphore(sc);
   14513 }
   14514 
   14515 static int
   14516 wm_get_nvm_80003(struct wm_softc *sc)
   14517 {
   14518 	int rv;
   14519 
   14520 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14521 		device_xname(sc->sc_dev), __func__));
   14522 
   14523 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14524 		aprint_error_dev(sc->sc_dev,
   14525 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14526 		return rv;
   14527 	}
   14528 
   14529 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14530 	    && (rv = wm_get_eecd(sc)) != 0) {
   14531 		aprint_error_dev(sc->sc_dev,
   14532 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14533 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14534 		return rv;
   14535 	}
   14536 
   14537 	return 0;
   14538 }
   14539 
   14540 static void
   14541 wm_put_nvm_80003(struct wm_softc *sc)
   14542 {
   14543 
   14544 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14545 		device_xname(sc->sc_dev), __func__));
   14546 
   14547 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14548 		wm_put_eecd(sc);
   14549 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14550 }
   14551 
   14552 static int
   14553 wm_get_nvm_82571(struct wm_softc *sc)
   14554 {
   14555 	int rv;
   14556 
   14557 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14558 		device_xname(sc->sc_dev), __func__));
   14559 
   14560 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14561 		return rv;
   14562 
   14563 	switch (sc->sc_type) {
   14564 	case WM_T_82573:
   14565 		break;
   14566 	default:
   14567 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14568 			rv = wm_get_eecd(sc);
   14569 		break;
   14570 	}
   14571 
   14572 	if (rv != 0) {
   14573 		aprint_error_dev(sc->sc_dev,
   14574 		    "%s: failed to get semaphore\n",
   14575 		    __func__);
   14576 		wm_put_swsm_semaphore(sc);
   14577 	}
   14578 
   14579 	return rv;
   14580 }
   14581 
   14582 static void
   14583 wm_put_nvm_82571(struct wm_softc *sc)
   14584 {
   14585 
   14586 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14587 		device_xname(sc->sc_dev), __func__));
   14588 
   14589 	switch (sc->sc_type) {
   14590 	case WM_T_82573:
   14591 		break;
   14592 	default:
   14593 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14594 			wm_put_eecd(sc);
   14595 		break;
   14596 	}
   14597 
   14598 	wm_put_swsm_semaphore(sc);
   14599 }
   14600 
   14601 static int
   14602 wm_get_phy_82575(struct wm_softc *sc)
   14603 {
   14604 
   14605 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14606 		device_xname(sc->sc_dev), __func__));
   14607 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14608 }
   14609 
   14610 static void
   14611 wm_put_phy_82575(struct wm_softc *sc)
   14612 {
   14613 
   14614 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14615 		device_xname(sc->sc_dev), __func__));
   14616 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14617 }
   14618 
   14619 static int
   14620 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14621 {
   14622 	uint32_t ext_ctrl;
   14623 	int timeout = 200;
   14624 
   14625 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14626 		device_xname(sc->sc_dev), __func__));
   14627 
   14628 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14629 	for (timeout = 0; timeout < 200; timeout++) {
   14630 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14631 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14632 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14633 
   14634 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14635 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14636 			return 0;
   14637 		delay(5000);
   14638 	}
   14639 	device_printf(sc->sc_dev,
   14640 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14641 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14642 	return 1;
   14643 }
   14644 
   14645 static void
   14646 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14647 {
   14648 	uint32_t ext_ctrl;
   14649 
   14650 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14651 		device_xname(sc->sc_dev), __func__));
   14652 
   14653 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14654 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14655 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14656 
   14657 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14658 }
   14659 
   14660 static int
   14661 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14662 {
   14663 	uint32_t ext_ctrl;
   14664 	int timeout;
   14665 
   14666 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14667 		device_xname(sc->sc_dev), __func__));
   14668 	mutex_enter(sc->sc_ich_phymtx);
   14669 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14670 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14671 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14672 			break;
   14673 		delay(1000);
   14674 	}
   14675 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14676 		device_printf(sc->sc_dev,
   14677 		    "SW has already locked the resource\n");
   14678 		goto out;
   14679 	}
   14680 
   14681 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14682 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14683 	for (timeout = 0; timeout < 1000; timeout++) {
   14684 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14685 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14686 			break;
   14687 		delay(1000);
   14688 	}
   14689 	if (timeout >= 1000) {
   14690 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14691 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14692 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14693 		goto out;
   14694 	}
   14695 	return 0;
   14696 
   14697 out:
   14698 	mutex_exit(sc->sc_ich_phymtx);
   14699 	return 1;
   14700 }
   14701 
   14702 static void
   14703 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14704 {
   14705 	uint32_t ext_ctrl;
   14706 
   14707 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14708 		device_xname(sc->sc_dev), __func__));
   14709 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14710 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14711 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14712 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14713 	} else {
   14714 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14715 	}
   14716 
   14717 	mutex_exit(sc->sc_ich_phymtx);
   14718 }
   14719 
   14720 static int
   14721 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14722 {
   14723 
   14724 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14725 		device_xname(sc->sc_dev), __func__));
   14726 	mutex_enter(sc->sc_ich_nvmmtx);
   14727 
   14728 	return 0;
   14729 }
   14730 
   14731 static void
   14732 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14733 {
   14734 
   14735 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14736 		device_xname(sc->sc_dev), __func__));
   14737 	mutex_exit(sc->sc_ich_nvmmtx);
   14738 }
   14739 
   14740 static int
   14741 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14742 {
   14743 	int i = 0;
   14744 	uint32_t reg;
   14745 
   14746 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14747 		device_xname(sc->sc_dev), __func__));
   14748 
   14749 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14750 	do {
   14751 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14752 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14753 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14754 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14755 			break;
   14756 		delay(2*1000);
   14757 		i++;
   14758 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14759 
   14760 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14761 		wm_put_hw_semaphore_82573(sc);
   14762 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14763 		    device_xname(sc->sc_dev));
   14764 		return -1;
   14765 	}
   14766 
   14767 	return 0;
   14768 }
   14769 
   14770 static void
   14771 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14772 {
   14773 	uint32_t reg;
   14774 
   14775 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14776 		device_xname(sc->sc_dev), __func__));
   14777 
   14778 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14779 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14780 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14781 }
   14782 
   14783 /*
   14784  * Management mode and power management related subroutines.
   14785  * BMC, AMT, suspend/resume and EEE.
   14786  */
   14787 
   14788 #ifdef WM_WOL
   14789 static int
   14790 wm_check_mng_mode(struct wm_softc *sc)
   14791 {
   14792 	int rv;
   14793 
   14794 	switch (sc->sc_type) {
   14795 	case WM_T_ICH8:
   14796 	case WM_T_ICH9:
   14797 	case WM_T_ICH10:
   14798 	case WM_T_PCH:
   14799 	case WM_T_PCH2:
   14800 	case WM_T_PCH_LPT:
   14801 	case WM_T_PCH_SPT:
   14802 	case WM_T_PCH_CNP:
   14803 		rv = wm_check_mng_mode_ich8lan(sc);
   14804 		break;
   14805 	case WM_T_82574:
   14806 	case WM_T_82583:
   14807 		rv = wm_check_mng_mode_82574(sc);
   14808 		break;
   14809 	case WM_T_82571:
   14810 	case WM_T_82572:
   14811 	case WM_T_82573:
   14812 	case WM_T_80003:
   14813 		rv = wm_check_mng_mode_generic(sc);
   14814 		break;
   14815 	default:
   14816 		/* Noting to do */
   14817 		rv = 0;
   14818 		break;
   14819 	}
   14820 
   14821 	return rv;
   14822 }
   14823 
   14824 static int
   14825 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14826 {
   14827 	uint32_t fwsm;
   14828 
   14829 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14830 
   14831 	if (((fwsm & FWSM_FW_VALID) != 0)
   14832 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14833 		return 1;
   14834 
   14835 	return 0;
   14836 }
   14837 
   14838 static int
   14839 wm_check_mng_mode_82574(struct wm_softc *sc)
   14840 {
   14841 	uint16_t data;
   14842 
   14843 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14844 
   14845 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14846 		return 1;
   14847 
   14848 	return 0;
   14849 }
   14850 
   14851 static int
   14852 wm_check_mng_mode_generic(struct wm_softc *sc)
   14853 {
   14854 	uint32_t fwsm;
   14855 
   14856 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14857 
   14858 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14859 		return 1;
   14860 
   14861 	return 0;
   14862 }
   14863 #endif /* WM_WOL */
   14864 
   14865 static int
   14866 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14867 {
   14868 	uint32_t manc, fwsm, factps;
   14869 
   14870 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14871 		return 0;
   14872 
   14873 	manc = CSR_READ(sc, WMREG_MANC);
   14874 
   14875 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14876 		device_xname(sc->sc_dev), manc));
   14877 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14878 		return 0;
   14879 
   14880 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14881 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14882 		factps = CSR_READ(sc, WMREG_FACTPS);
   14883 		if (((factps & FACTPS_MNGCG) == 0)
   14884 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14885 			return 1;
   14886 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14887 		uint16_t data;
   14888 
   14889 		factps = CSR_READ(sc, WMREG_FACTPS);
   14890 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14891 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14892 			device_xname(sc->sc_dev), factps, data));
   14893 		if (((factps & FACTPS_MNGCG) == 0)
   14894 		    && ((data & NVM_CFG2_MNGM_MASK)
   14895 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14896 			return 1;
   14897 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14898 	    && ((manc & MANC_ASF_EN) == 0))
   14899 		return 1;
   14900 
   14901 	return 0;
   14902 }
   14903 
   14904 static bool
   14905 wm_phy_resetisblocked(struct wm_softc *sc)
   14906 {
   14907 	bool blocked = false;
   14908 	uint32_t reg;
   14909 	int i = 0;
   14910 
   14911 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14912 		device_xname(sc->sc_dev), __func__));
   14913 
   14914 	switch (sc->sc_type) {
   14915 	case WM_T_ICH8:
   14916 	case WM_T_ICH9:
   14917 	case WM_T_ICH10:
   14918 	case WM_T_PCH:
   14919 	case WM_T_PCH2:
   14920 	case WM_T_PCH_LPT:
   14921 	case WM_T_PCH_SPT:
   14922 	case WM_T_PCH_CNP:
   14923 		do {
   14924 			reg = CSR_READ(sc, WMREG_FWSM);
   14925 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14926 				blocked = true;
   14927 				delay(10*1000);
   14928 				continue;
   14929 			}
   14930 			blocked = false;
   14931 		} while (blocked && (i++ < 30));
   14932 		return blocked;
   14933 		break;
   14934 	case WM_T_82571:
   14935 	case WM_T_82572:
   14936 	case WM_T_82573:
   14937 	case WM_T_82574:
   14938 	case WM_T_82583:
   14939 	case WM_T_80003:
   14940 		reg = CSR_READ(sc, WMREG_MANC);
   14941 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14942 			return true;
   14943 		else
   14944 			return false;
   14945 		break;
   14946 	default:
   14947 		/* No problem */
   14948 		break;
   14949 	}
   14950 
   14951 	return false;
   14952 }
   14953 
   14954 static void
   14955 wm_get_hw_control(struct wm_softc *sc)
   14956 {
   14957 	uint32_t reg;
   14958 
   14959 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14960 		device_xname(sc->sc_dev), __func__));
   14961 
   14962 	if (sc->sc_type == WM_T_82573) {
   14963 		reg = CSR_READ(sc, WMREG_SWSM);
   14964 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14965 	} else if (sc->sc_type >= WM_T_82571) {
   14966 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14967 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14968 	}
   14969 }
   14970 
   14971 static void
   14972 wm_release_hw_control(struct wm_softc *sc)
   14973 {
   14974 	uint32_t reg;
   14975 
   14976 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14977 		device_xname(sc->sc_dev), __func__));
   14978 
   14979 	if (sc->sc_type == WM_T_82573) {
   14980 		reg = CSR_READ(sc, WMREG_SWSM);
   14981 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14982 	} else if (sc->sc_type >= WM_T_82571) {
   14983 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14984 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14985 	}
   14986 }
   14987 
   14988 static void
   14989 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14990 {
   14991 	uint32_t reg;
   14992 
   14993 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14994 		device_xname(sc->sc_dev), __func__));
   14995 
   14996 	if (sc->sc_type < WM_T_PCH2)
   14997 		return;
   14998 
   14999 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15000 
   15001 	if (gate)
   15002 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15003 	else
   15004 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15005 
   15006 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15007 }
   15008 
   15009 static int
   15010 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15011 {
   15012 	uint32_t fwsm, reg;
   15013 	int rv = 0;
   15014 
   15015 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15016 		device_xname(sc->sc_dev), __func__));
   15017 
   15018 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15019 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15020 
   15021 	/* Disable ULP */
   15022 	wm_ulp_disable(sc);
   15023 
   15024 	/* Acquire PHY semaphore */
   15025 	rv = sc->phy.acquire(sc);
   15026 	if (rv != 0) {
   15027 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15028 		device_xname(sc->sc_dev), __func__));
   15029 		return -1;
   15030 	}
   15031 
   15032 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15033 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15034 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15035 	 */
   15036 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15037 	switch (sc->sc_type) {
   15038 	case WM_T_PCH_LPT:
   15039 	case WM_T_PCH_SPT:
   15040 	case WM_T_PCH_CNP:
   15041 		if (wm_phy_is_accessible_pchlan(sc))
   15042 			break;
   15043 
   15044 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15045 		 * forcing MAC to SMBus mode first.
   15046 		 */
   15047 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15048 		reg |= CTRL_EXT_FORCE_SMBUS;
   15049 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15050 #if 0
   15051 		/* XXX Isn't this required??? */
   15052 		CSR_WRITE_FLUSH(sc);
   15053 #endif
   15054 		/* Wait 50 milliseconds for MAC to finish any retries
   15055 		 * that it might be trying to perform from previous
   15056 		 * attempts to acknowledge any phy read requests.
   15057 		 */
   15058 		delay(50 * 1000);
   15059 		/* FALLTHROUGH */
   15060 	case WM_T_PCH2:
   15061 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15062 			break;
   15063 		/* FALLTHROUGH */
   15064 	case WM_T_PCH:
   15065 		if (sc->sc_type == WM_T_PCH)
   15066 			if ((fwsm & FWSM_FW_VALID) != 0)
   15067 				break;
   15068 
   15069 		if (wm_phy_resetisblocked(sc) == true) {
   15070 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15071 			break;
   15072 		}
   15073 
   15074 		/* Toggle LANPHYPC Value bit */
   15075 		wm_toggle_lanphypc_pch_lpt(sc);
   15076 
   15077 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15078 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15079 				break;
   15080 
   15081 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15082 			 * so ensure that the MAC is also out of SMBus mode
   15083 			 */
   15084 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15085 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15086 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15087 
   15088 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15089 				break;
   15090 			rv = -1;
   15091 		}
   15092 		break;
   15093 	default:
   15094 		break;
   15095 	}
   15096 
   15097 	/* Release semaphore */
   15098 	sc->phy.release(sc);
   15099 
   15100 	if (rv == 0) {
   15101 		/* Check to see if able to reset PHY.  Print error if not */
   15102 		if (wm_phy_resetisblocked(sc)) {
   15103 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15104 			goto out;
   15105 		}
   15106 
   15107 		/* Reset the PHY before any access to it.  Doing so, ensures
   15108 		 * that the PHY is in a known good state before we read/write
   15109 		 * PHY registers.  The generic reset is sufficient here,
   15110 		 * because we haven't determined the PHY type yet.
   15111 		 */
   15112 		if (wm_reset_phy(sc) != 0)
   15113 			goto out;
   15114 
   15115 		/* On a successful reset, possibly need to wait for the PHY
   15116 		 * to quiesce to an accessible state before returning control
   15117 		 * to the calling function.  If the PHY does not quiesce, then
   15118 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15119 		 *  the PHY is in.
   15120 		 */
   15121 		if (wm_phy_resetisblocked(sc))
   15122 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15123 	}
   15124 
   15125 out:
   15126 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15127 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15128 		delay(10*1000);
   15129 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15130 	}
   15131 
   15132 	return 0;
   15133 }
   15134 
   15135 static void
   15136 wm_init_manageability(struct wm_softc *sc)
   15137 {
   15138 
   15139 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15140 		device_xname(sc->sc_dev), __func__));
   15141 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15142 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15143 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15144 
   15145 		/* Disable hardware interception of ARP */
   15146 		manc &= ~MANC_ARP_EN;
   15147 
   15148 		/* Enable receiving management packets to the host */
   15149 		if (sc->sc_type >= WM_T_82571) {
   15150 			manc |= MANC_EN_MNG2HOST;
   15151 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15152 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15153 		}
   15154 
   15155 		CSR_WRITE(sc, WMREG_MANC, manc);
   15156 	}
   15157 }
   15158 
   15159 static void
   15160 wm_release_manageability(struct wm_softc *sc)
   15161 {
   15162 
   15163 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15164 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15165 
   15166 		manc |= MANC_ARP_EN;
   15167 		if (sc->sc_type >= WM_T_82571)
   15168 			manc &= ~MANC_EN_MNG2HOST;
   15169 
   15170 		CSR_WRITE(sc, WMREG_MANC, manc);
   15171 	}
   15172 }
   15173 
   15174 static void
   15175 wm_get_wakeup(struct wm_softc *sc)
   15176 {
   15177 
   15178 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15179 	switch (sc->sc_type) {
   15180 	case WM_T_82573:
   15181 	case WM_T_82583:
   15182 		sc->sc_flags |= WM_F_HAS_AMT;
   15183 		/* FALLTHROUGH */
   15184 	case WM_T_80003:
   15185 	case WM_T_82575:
   15186 	case WM_T_82576:
   15187 	case WM_T_82580:
   15188 	case WM_T_I350:
   15189 	case WM_T_I354:
   15190 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15191 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15192 		/* FALLTHROUGH */
   15193 	case WM_T_82541:
   15194 	case WM_T_82541_2:
   15195 	case WM_T_82547:
   15196 	case WM_T_82547_2:
   15197 	case WM_T_82571:
   15198 	case WM_T_82572:
   15199 	case WM_T_82574:
   15200 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15201 		break;
   15202 	case WM_T_ICH8:
   15203 	case WM_T_ICH9:
   15204 	case WM_T_ICH10:
   15205 	case WM_T_PCH:
   15206 	case WM_T_PCH2:
   15207 	case WM_T_PCH_LPT:
   15208 	case WM_T_PCH_SPT:
   15209 	case WM_T_PCH_CNP:
   15210 		sc->sc_flags |= WM_F_HAS_AMT;
   15211 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15212 		break;
   15213 	default:
   15214 		break;
   15215 	}
   15216 
   15217 	/* 1: HAS_MANAGE */
   15218 	if (wm_enable_mng_pass_thru(sc) != 0)
   15219 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15220 
   15221 	/*
   15222 	 * Note that the WOL flags is set after the resetting of the eeprom
   15223 	 * stuff
   15224 	 */
   15225 }
   15226 
   15227 /*
   15228  * Unconfigure Ultra Low Power mode.
   15229  * Only for I217 and newer (see below).
   15230  */
   15231 static int
   15232 wm_ulp_disable(struct wm_softc *sc)
   15233 {
   15234 	uint32_t reg;
   15235 	uint16_t phyreg;
   15236 	int i = 0, rv = 0;
   15237 
   15238 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15239 		device_xname(sc->sc_dev), __func__));
   15240 	/* Exclude old devices */
   15241 	if ((sc->sc_type < WM_T_PCH_LPT)
   15242 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15243 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15244 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15245 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15246 		return 0;
   15247 
   15248 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15249 		/* Request ME un-configure ULP mode in the PHY */
   15250 		reg = CSR_READ(sc, WMREG_H2ME);
   15251 		reg &= ~H2ME_ULP;
   15252 		reg |= H2ME_ENFORCE_SETTINGS;
   15253 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15254 
   15255 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15256 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15257 			if (i++ == 30) {
   15258 				device_printf(sc->sc_dev, "%s timed out\n",
   15259 				    __func__);
   15260 				return -1;
   15261 			}
   15262 			delay(10 * 1000);
   15263 		}
   15264 		reg = CSR_READ(sc, WMREG_H2ME);
   15265 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15266 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15267 
   15268 		return 0;
   15269 	}
   15270 
   15271 	/* Acquire semaphore */
   15272 	rv = sc->phy.acquire(sc);
   15273 	if (rv != 0) {
   15274 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15275 		device_xname(sc->sc_dev), __func__));
   15276 		return -1;
   15277 	}
   15278 
   15279 	/* Toggle LANPHYPC */
   15280 	wm_toggle_lanphypc_pch_lpt(sc);
   15281 
   15282 	/* Unforce SMBus mode in PHY */
   15283 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15284 	if (rv != 0) {
   15285 		uint32_t reg2;
   15286 
   15287 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15288 			__func__);
   15289 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15290 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15291 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15292 		delay(50 * 1000);
   15293 
   15294 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15295 		    &phyreg);
   15296 		if (rv != 0)
   15297 			goto release;
   15298 	}
   15299 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15300 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15301 
   15302 	/* Unforce SMBus mode in MAC */
   15303 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15304 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15305 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15306 
   15307 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15308 	if (rv != 0)
   15309 		goto release;
   15310 	phyreg |= HV_PM_CTRL_K1_ENA;
   15311 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15312 
   15313 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15314 		&phyreg);
   15315 	if (rv != 0)
   15316 		goto release;
   15317 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15318 	    | I218_ULP_CONFIG1_STICKY_ULP
   15319 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15320 	    | I218_ULP_CONFIG1_WOL_HOST
   15321 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15322 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15323 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15324 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15325 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15326 	phyreg |= I218_ULP_CONFIG1_START;
   15327 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15328 
   15329 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15330 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15331 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15332 
   15333 release:
   15334 	/* Release semaphore */
   15335 	sc->phy.release(sc);
   15336 	wm_gmii_reset(sc);
   15337 	delay(50 * 1000);
   15338 
   15339 	return rv;
   15340 }
   15341 
   15342 /* WOL in the newer chipset interfaces (pchlan) */
   15343 static int
   15344 wm_enable_phy_wakeup(struct wm_softc *sc)
   15345 {
   15346 	device_t dev = sc->sc_dev;
   15347 	uint32_t mreg, moff;
   15348 	uint16_t wuce, wuc, wufc, preg;
   15349 	int i, rv;
   15350 
   15351 	KASSERT(sc->sc_type >= WM_T_PCH);
   15352 
   15353 	/* Copy MAC RARs to PHY RARs */
   15354 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15355 
   15356 	/* Activate PHY wakeup */
   15357 	rv = sc->phy.acquire(sc);
   15358 	if (rv != 0) {
   15359 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15360 		    __func__);
   15361 		return rv;
   15362 	}
   15363 
   15364 	/*
   15365 	 * Enable access to PHY wakeup registers.
   15366 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15367 	 */
   15368 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15369 	if (rv != 0) {
   15370 		device_printf(dev,
   15371 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15372 		goto release;
   15373 	}
   15374 
   15375 	/* Copy MAC MTA to PHY MTA */
   15376 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15377 		uint16_t lo, hi;
   15378 
   15379 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15380 		lo = (uint16_t)(mreg & 0xffff);
   15381 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15382 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15383 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15384 	}
   15385 
   15386 	/* Configure PHY Rx Control register */
   15387 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15388 	mreg = CSR_READ(sc, WMREG_RCTL);
   15389 	if (mreg & RCTL_UPE)
   15390 		preg |= BM_RCTL_UPE;
   15391 	if (mreg & RCTL_MPE)
   15392 		preg |= BM_RCTL_MPE;
   15393 	preg &= ~(BM_RCTL_MO_MASK);
   15394 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15395 	if (moff != 0)
   15396 		preg |= moff << BM_RCTL_MO_SHIFT;
   15397 	if (mreg & RCTL_BAM)
   15398 		preg |= BM_RCTL_BAM;
   15399 	if (mreg & RCTL_PMCF)
   15400 		preg |= BM_RCTL_PMCF;
   15401 	mreg = CSR_READ(sc, WMREG_CTRL);
   15402 	if (mreg & CTRL_RFCE)
   15403 		preg |= BM_RCTL_RFCE;
   15404 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15405 
   15406 	wuc = WUC_APME | WUC_PME_EN;
   15407 	wufc = WUFC_MAG;
   15408 	/* Enable PHY wakeup in MAC register */
   15409 	CSR_WRITE(sc, WMREG_WUC,
   15410 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15411 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15412 
   15413 	/* Configure and enable PHY wakeup in PHY registers */
   15414 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15415 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15416 
   15417 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15418 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15419 
   15420 release:
   15421 	sc->phy.release(sc);
   15422 
   15423 	return 0;
   15424 }
   15425 
   15426 /* Power down workaround on D3 */
   15427 static void
   15428 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15429 {
   15430 	uint32_t reg;
   15431 	uint16_t phyreg;
   15432 	int i;
   15433 
   15434 	for (i = 0; i < 2; i++) {
   15435 		/* Disable link */
   15436 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15437 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15438 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15439 
   15440 		/*
   15441 		 * Call gig speed drop workaround on Gig disable before
   15442 		 * accessing any PHY registers
   15443 		 */
   15444 		if (sc->sc_type == WM_T_ICH8)
   15445 			wm_gig_downshift_workaround_ich8lan(sc);
   15446 
   15447 		/* Write VR power-down enable */
   15448 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15449 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15450 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15451 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15452 
   15453 		/* Read it back and test */
   15454 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15455 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15456 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15457 			break;
   15458 
   15459 		/* Issue PHY reset and repeat at most one more time */
   15460 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15461 	}
   15462 }
   15463 
   15464 /*
   15465  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15466  *  @sc: pointer to the HW structure
   15467  *
   15468  *  During S0 to Sx transition, it is possible the link remains at gig
   15469  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15470  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15471  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15472  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15473  *  needs to be written.
   15474  *  Parts that support (and are linked to a partner which support) EEE in
   15475  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15476  *  than 10Mbps w/o EEE.
   15477  */
   15478 static void
   15479 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15480 {
   15481 	device_t dev = sc->sc_dev;
   15482 	struct ethercom *ec = &sc->sc_ethercom;
   15483 	uint32_t phy_ctrl;
   15484 	int rv;
   15485 
   15486 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15487 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15488 
   15489 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15490 
   15491 	if (sc->sc_phytype == WMPHY_I217) {
   15492 		uint16_t devid = sc->sc_pcidevid;
   15493 
   15494 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15495 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15496 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15497 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15498 		    (sc->sc_type >= WM_T_PCH_SPT))
   15499 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15500 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15501 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15502 
   15503 		if (sc->phy.acquire(sc) != 0)
   15504 			goto out;
   15505 
   15506 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15507 			uint16_t eee_advert;
   15508 
   15509 			rv = wm_read_emi_reg_locked(dev,
   15510 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15511 			if (rv)
   15512 				goto release;
   15513 
   15514 			/*
   15515 			 * Disable LPLU if both link partners support 100BaseT
   15516 			 * EEE and 100Full is advertised on both ends of the
   15517 			 * link, and enable Auto Enable LPI since there will
   15518 			 * be no driver to enable LPI while in Sx.
   15519 			 */
   15520 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15521 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15522 				uint16_t anar, phy_reg;
   15523 
   15524 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15525 				    &anar);
   15526 				if (anar & ANAR_TX_FD) {
   15527 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15528 					    PHY_CTRL_NOND0A_LPLU);
   15529 
   15530 					/* Set Auto Enable LPI after link up */
   15531 					sc->phy.readreg_locked(dev, 2,
   15532 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15533 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15534 					sc->phy.writereg_locked(dev, 2,
   15535 					    I217_LPI_GPIO_CTRL, phy_reg);
   15536 				}
   15537 			}
   15538 		}
   15539 
   15540 		/*
   15541 		 * For i217 Intel Rapid Start Technology support,
   15542 		 * when the system is going into Sx and no manageability engine
   15543 		 * is present, the driver must configure proxy to reset only on
   15544 		 * power good.	LPI (Low Power Idle) state must also reset only
   15545 		 * on power good, as well as the MTA (Multicast table array).
   15546 		 * The SMBus release must also be disabled on LCD reset.
   15547 		 */
   15548 
   15549 		/*
   15550 		 * Enable MTA to reset for Intel Rapid Start Technology
   15551 		 * Support
   15552 		 */
   15553 
   15554 release:
   15555 		sc->phy.release(sc);
   15556 	}
   15557 out:
   15558 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15559 
   15560 	if (sc->sc_type == WM_T_ICH8)
   15561 		wm_gig_downshift_workaround_ich8lan(sc);
   15562 
   15563 	if (sc->sc_type >= WM_T_PCH) {
   15564 		wm_oem_bits_config_ich8lan(sc, false);
   15565 
   15566 		/* Reset PHY to activate OEM bits on 82577/8 */
   15567 		if (sc->sc_type == WM_T_PCH)
   15568 			wm_reset_phy(sc);
   15569 
   15570 		if (sc->phy.acquire(sc) != 0)
   15571 			return;
   15572 		wm_write_smbus_addr(sc);
   15573 		sc->phy.release(sc);
   15574 	}
   15575 }
   15576 
   15577 /*
   15578  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15579  *  @sc: pointer to the HW structure
   15580  *
   15581  *  During Sx to S0 transitions on non-managed devices or managed devices
   15582  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15583  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15584  *  the PHY.
   15585  *  On i217, setup Intel Rapid Start Technology.
   15586  */
   15587 static int
   15588 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15589 {
   15590 	device_t dev = sc->sc_dev;
   15591 	int rv;
   15592 
   15593 	if (sc->sc_type < WM_T_PCH2)
   15594 		return 0;
   15595 
   15596 	rv = wm_init_phy_workarounds_pchlan(sc);
   15597 	if (rv != 0)
   15598 		return -1;
   15599 
   15600 	/* For i217 Intel Rapid Start Technology support when the system
   15601 	 * is transitioning from Sx and no manageability engine is present
   15602 	 * configure SMBus to restore on reset, disable proxy, and enable
   15603 	 * the reset on MTA (Multicast table array).
   15604 	 */
   15605 	if (sc->sc_phytype == WMPHY_I217) {
   15606 		uint16_t phy_reg;
   15607 
   15608 		if (sc->phy.acquire(sc) != 0)
   15609 			return -1;
   15610 
   15611 		/* Clear Auto Enable LPI after link up */
   15612 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15613 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15614 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15615 
   15616 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15617 			/* Restore clear on SMB if no manageability engine
   15618 			 * is present
   15619 			 */
   15620 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15621 			    &phy_reg);
   15622 			if (rv != 0)
   15623 				goto release;
   15624 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15625 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15626 
   15627 			/* Disable Proxy */
   15628 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15629 		}
   15630 		/* Enable reset on MTA */
   15631 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15632 		if (rv != 0)
   15633 			goto release;
   15634 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15635 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15636 
   15637 release:
   15638 		sc->phy.release(sc);
   15639 		return rv;
   15640 	}
   15641 
   15642 	return 0;
   15643 }
   15644 
   15645 static void
   15646 wm_enable_wakeup(struct wm_softc *sc)
   15647 {
   15648 	uint32_t reg, pmreg;
   15649 	pcireg_t pmode;
   15650 	int rv = 0;
   15651 
   15652 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15653 		device_xname(sc->sc_dev), __func__));
   15654 
   15655 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15656 	    &pmreg, NULL) == 0)
   15657 		return;
   15658 
   15659 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15660 		goto pme;
   15661 
   15662 	/* Advertise the wakeup capability */
   15663 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15664 	    | CTRL_SWDPIN(3));
   15665 
   15666 	/* Keep the laser running on fiber adapters */
   15667 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15668 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15669 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15670 		reg |= CTRL_EXT_SWDPIN(3);
   15671 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15672 	}
   15673 
   15674 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15675 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15676 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15677 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15678 		wm_suspend_workarounds_ich8lan(sc);
   15679 
   15680 #if 0	/* For the multicast packet */
   15681 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15682 	reg |= WUFC_MC;
   15683 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15684 #endif
   15685 
   15686 	if (sc->sc_type >= WM_T_PCH) {
   15687 		rv = wm_enable_phy_wakeup(sc);
   15688 		if (rv != 0)
   15689 			goto pme;
   15690 	} else {
   15691 		/* Enable wakeup by the MAC */
   15692 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15693 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15694 	}
   15695 
   15696 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15697 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15698 		|| (sc->sc_type == WM_T_PCH2))
   15699 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15700 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15701 
   15702 pme:
   15703 	/* Request PME */
   15704 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15705 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15706 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15707 		/* For WOL */
   15708 		pmode |= PCI_PMCSR_PME_EN;
   15709 	} else {
   15710 		/* Disable WOL */
   15711 		pmode &= ~PCI_PMCSR_PME_EN;
   15712 	}
   15713 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15714 }
   15715 
   15716 /* Disable ASPM L0s and/or L1 for workaround */
   15717 static void
   15718 wm_disable_aspm(struct wm_softc *sc)
   15719 {
   15720 	pcireg_t reg, mask = 0;
   15721 	unsigned const char *str = "";
   15722 
   15723 	/*
   15724 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15725 	 * space.
   15726 	 */
   15727 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15728 		return;
   15729 
   15730 	switch (sc->sc_type) {
   15731 	case WM_T_82571:
   15732 	case WM_T_82572:
   15733 		/*
   15734 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15735 		 * State Power management L1 State (ASPM L1).
   15736 		 */
   15737 		mask = PCIE_LCSR_ASPM_L1;
   15738 		str = "L1 is";
   15739 		break;
   15740 	case WM_T_82573:
   15741 	case WM_T_82574:
   15742 	case WM_T_82583:
   15743 		/*
   15744 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15745 		 *
   15746 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15747 		 * some chipset.  The document of 82574 and 82583 says that
   15748 		 * disabling L0s with some specific chipset is sufficient,
   15749 		 * but we follow as of the Intel em driver does.
   15750 		 *
   15751 		 * References:
   15752 		 * Errata 8 of the Specification Update of i82573.
   15753 		 * Errata 20 of the Specification Update of i82574.
   15754 		 * Errata 9 of the Specification Update of i82583.
   15755 		 */
   15756 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15757 		str = "L0s and L1 are";
   15758 		break;
   15759 	default:
   15760 		return;
   15761 	}
   15762 
   15763 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15764 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15765 	reg &= ~mask;
   15766 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15767 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15768 
   15769 	/* Print only in wm_attach() */
   15770 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15771 		aprint_verbose_dev(sc->sc_dev,
   15772 		    "ASPM %s disabled to workaround the errata.\n", str);
   15773 }
   15774 
   15775 /* LPLU */
   15776 
   15777 static void
   15778 wm_lplu_d0_disable(struct wm_softc *sc)
   15779 {
   15780 	struct mii_data *mii = &sc->sc_mii;
   15781 	uint32_t reg;
   15782 	uint16_t phyval;
   15783 
   15784 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15785 		device_xname(sc->sc_dev), __func__));
   15786 
   15787 	if (sc->sc_phytype == WMPHY_IFE)
   15788 		return;
   15789 
   15790 	switch (sc->sc_type) {
   15791 	case WM_T_82571:
   15792 	case WM_T_82572:
   15793 	case WM_T_82573:
   15794 	case WM_T_82575:
   15795 	case WM_T_82576:
   15796 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15797 		phyval &= ~PMR_D0_LPLU;
   15798 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15799 		break;
   15800 	case WM_T_82580:
   15801 	case WM_T_I350:
   15802 	case WM_T_I210:
   15803 	case WM_T_I211:
   15804 		reg = CSR_READ(sc, WMREG_PHPM);
   15805 		reg &= ~PHPM_D0A_LPLU;
   15806 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15807 		break;
   15808 	case WM_T_82574:
   15809 	case WM_T_82583:
   15810 	case WM_T_ICH8:
   15811 	case WM_T_ICH9:
   15812 	case WM_T_ICH10:
   15813 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15814 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15815 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15816 		CSR_WRITE_FLUSH(sc);
   15817 		break;
   15818 	case WM_T_PCH:
   15819 	case WM_T_PCH2:
   15820 	case WM_T_PCH_LPT:
   15821 	case WM_T_PCH_SPT:
   15822 	case WM_T_PCH_CNP:
   15823 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15824 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15825 		if (wm_phy_resetisblocked(sc) == false)
   15826 			phyval |= HV_OEM_BITS_ANEGNOW;
   15827 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15828 		break;
   15829 	default:
   15830 		break;
   15831 	}
   15832 }
   15833 
   15834 /* EEE */
   15835 
   15836 static int
   15837 wm_set_eee_i350(struct wm_softc *sc)
   15838 {
   15839 	struct ethercom *ec = &sc->sc_ethercom;
   15840 	uint32_t ipcnfg, eeer;
   15841 	uint32_t ipcnfg_mask
   15842 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15843 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15844 
   15845 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15846 
   15847 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15848 	eeer = CSR_READ(sc, WMREG_EEER);
   15849 
   15850 	/* Enable or disable per user setting */
   15851 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15852 		ipcnfg |= ipcnfg_mask;
   15853 		eeer |= eeer_mask;
   15854 	} else {
   15855 		ipcnfg &= ~ipcnfg_mask;
   15856 		eeer &= ~eeer_mask;
   15857 	}
   15858 
   15859 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15860 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15861 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15862 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15863 
   15864 	return 0;
   15865 }
   15866 
   15867 static int
   15868 wm_set_eee_pchlan(struct wm_softc *sc)
   15869 {
   15870 	device_t dev = sc->sc_dev;
   15871 	struct ethercom *ec = &sc->sc_ethercom;
   15872 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15873 	int rv = 0;
   15874 
   15875 	switch (sc->sc_phytype) {
   15876 	case WMPHY_82579:
   15877 		lpa = I82579_EEE_LP_ABILITY;
   15878 		pcs_status = I82579_EEE_PCS_STATUS;
   15879 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15880 		break;
   15881 	case WMPHY_I217:
   15882 		lpa = I217_EEE_LP_ABILITY;
   15883 		pcs_status = I217_EEE_PCS_STATUS;
   15884 		adv_addr = I217_EEE_ADVERTISEMENT;
   15885 		break;
   15886 	default:
   15887 		return 0;
   15888 	}
   15889 
   15890 	if (sc->phy.acquire(sc)) {
   15891 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15892 		return 0;
   15893 	}
   15894 
   15895 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15896 	if (rv != 0)
   15897 		goto release;
   15898 
   15899 	/* Clear bits that enable EEE in various speeds */
   15900 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15901 
   15902 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15903 		/* Save off link partner's EEE ability */
   15904 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15905 		if (rv != 0)
   15906 			goto release;
   15907 
   15908 		/* Read EEE advertisement */
   15909 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15910 			goto release;
   15911 
   15912 		/*
   15913 		 * Enable EEE only for speeds in which the link partner is
   15914 		 * EEE capable and for which we advertise EEE.
   15915 		 */
   15916 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15917 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15918 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15919 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15920 			if ((data & ANLPAR_TX_FD) != 0)
   15921 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15922 			else {
   15923 				/*
   15924 				 * EEE is not supported in 100Half, so ignore
   15925 				 * partner's EEE in 100 ability if full-duplex
   15926 				 * is not advertised.
   15927 				 */
   15928 				sc->eee_lp_ability
   15929 				    &= ~AN_EEEADVERT_100_TX;
   15930 			}
   15931 		}
   15932 	}
   15933 
   15934 	if (sc->sc_phytype == WMPHY_82579) {
   15935 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15936 		if (rv != 0)
   15937 			goto release;
   15938 
   15939 		data &= ~I82579_LPI_PLL_SHUT_100;
   15940 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15941 	}
   15942 
   15943 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15944 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15945 		goto release;
   15946 
   15947 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15948 release:
   15949 	sc->phy.release(sc);
   15950 
   15951 	return rv;
   15952 }
   15953 
   15954 static int
   15955 wm_set_eee(struct wm_softc *sc)
   15956 {
   15957 	struct ethercom *ec = &sc->sc_ethercom;
   15958 
   15959 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15960 		return 0;
   15961 
   15962 	if (sc->sc_type == WM_T_I354) {
   15963 		/* I354 uses an external PHY */
   15964 		return 0; /* not yet */
   15965 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15966 		return wm_set_eee_i350(sc);
   15967 	else if (sc->sc_type >= WM_T_PCH2)
   15968 		return wm_set_eee_pchlan(sc);
   15969 
   15970 	return 0;
   15971 }
   15972 
   15973 /*
   15974  * Workarounds (mainly PHY related).
   15975  * Basically, PHY's workarounds are in the PHY drivers.
   15976  */
   15977 
   15978 /* Work-around for 82566 Kumeran PCS lock loss */
   15979 static int
   15980 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15981 {
   15982 	struct mii_data *mii = &sc->sc_mii;
   15983 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15984 	int i, reg, rv;
   15985 	uint16_t phyreg;
   15986 
   15987 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15988 		device_xname(sc->sc_dev), __func__));
   15989 
   15990 	/* If the link is not up, do nothing */
   15991 	if ((status & STATUS_LU) == 0)
   15992 		return 0;
   15993 
   15994 	/* Nothing to do if the link is other than 1Gbps */
   15995 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15996 		return 0;
   15997 
   15998 	for (i = 0; i < 10; i++) {
   15999 		/* read twice */
   16000 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16001 		if (rv != 0)
   16002 			return rv;
   16003 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16004 		if (rv != 0)
   16005 			return rv;
   16006 
   16007 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16008 			goto out;	/* GOOD! */
   16009 
   16010 		/* Reset the PHY */
   16011 		wm_reset_phy(sc);
   16012 		delay(5*1000);
   16013 	}
   16014 
   16015 	/* Disable GigE link negotiation */
   16016 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16017 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16018 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16019 
   16020 	/*
   16021 	 * Call gig speed drop workaround on Gig disable before accessing
   16022 	 * any PHY registers.
   16023 	 */
   16024 	wm_gig_downshift_workaround_ich8lan(sc);
   16025 
   16026 out:
   16027 	return 0;
   16028 }
   16029 
   16030 /*
   16031  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16032  *  @sc: pointer to the HW structure
   16033  *
   16034  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16035  *  LPLU, Gig disable, MDIC PHY reset):
   16036  *    1) Set Kumeran Near-end loopback
   16037  *    2) Clear Kumeran Near-end loopback
   16038  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16039  */
   16040 static void
   16041 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16042 {
   16043 	uint16_t kmreg;
   16044 
   16045 	/* Only for igp3 */
   16046 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16047 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16048 			return;
   16049 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16050 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16051 			return;
   16052 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16053 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16054 	}
   16055 }
   16056 
   16057 /*
   16058  * Workaround for pch's PHYs
   16059  * XXX should be moved to new PHY driver?
   16060  */
   16061 static int
   16062 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16063 {
   16064 	device_t dev = sc->sc_dev;
   16065 	struct mii_data *mii = &sc->sc_mii;
   16066 	struct mii_softc *child;
   16067 	uint16_t phy_data, phyrev = 0;
   16068 	int phytype = sc->sc_phytype;
   16069 	int rv;
   16070 
   16071 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16072 		device_xname(dev), __func__));
   16073 	KASSERT(sc->sc_type == WM_T_PCH);
   16074 
   16075 	/* Set MDIO slow mode before any other MDIO access */
   16076 	if (phytype == WMPHY_82577)
   16077 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16078 			return rv;
   16079 
   16080 	child = LIST_FIRST(&mii->mii_phys);
   16081 	if (child != NULL)
   16082 		phyrev = child->mii_mpd_rev;
   16083 
   16084 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16085 	if ((child != NULL) &&
   16086 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16087 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16088 		/* Disable generation of early preamble (0x4431) */
   16089 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16090 		    &phy_data);
   16091 		if (rv != 0)
   16092 			return rv;
   16093 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16094 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16095 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16096 		    phy_data);
   16097 		if (rv != 0)
   16098 			return rv;
   16099 
   16100 		/* Preamble tuning for SSC */
   16101 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16102 		if (rv != 0)
   16103 			return rv;
   16104 	}
   16105 
   16106 	/* 82578 */
   16107 	if (phytype == WMPHY_82578) {
   16108 		/*
   16109 		 * Return registers to default by doing a soft reset then
   16110 		 * writing 0x3140 to the control register
   16111 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16112 		 */
   16113 		if ((child != NULL) && (phyrev < 2)) {
   16114 			PHY_RESET(child);
   16115 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16116 			if (rv != 0)
   16117 				return rv;
   16118 		}
   16119 	}
   16120 
   16121 	/* Select page 0 */
   16122 	if ((rv = sc->phy.acquire(sc)) != 0)
   16123 		return rv;
   16124 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16125 	sc->phy.release(sc);
   16126 	if (rv != 0)
   16127 		return rv;
   16128 
   16129 	/*
   16130 	 * Configure the K1 Si workaround during phy reset assuming there is
   16131 	 * link so that it disables K1 if link is in 1Gbps.
   16132 	 */
   16133 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16134 		return rv;
   16135 
   16136 	/* Workaround for link disconnects on a busy hub in half duplex */
   16137 	rv = sc->phy.acquire(sc);
   16138 	if (rv)
   16139 		return rv;
   16140 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16141 	if (rv)
   16142 		goto release;
   16143 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16144 	    phy_data & 0x00ff);
   16145 	if (rv)
   16146 		goto release;
   16147 
   16148 	/* Set MSE higher to enable link to stay up when noise is high */
   16149 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16150 release:
   16151 	sc->phy.release(sc);
   16152 
   16153 	return rv;
   16154 }
   16155 
   16156 /*
   16157  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16158  *  @sc:   pointer to the HW structure
   16159  */
   16160 static void
   16161 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16162 {
   16163 
   16164 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16165 		device_xname(sc->sc_dev), __func__));
   16166 
   16167 	if (sc->phy.acquire(sc) != 0)
   16168 		return;
   16169 
   16170 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16171 
   16172 	sc->phy.release(sc);
   16173 }
   16174 
   16175 static void
   16176 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16177 {
   16178 	device_t dev = sc->sc_dev;
   16179 	uint32_t mac_reg;
   16180 	uint16_t i, wuce;
   16181 	int count;
   16182 
   16183 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16184 		device_xname(dev), __func__));
   16185 
   16186 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16187 		return;
   16188 
   16189 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16190 	count = wm_rar_count(sc);
   16191 	for (i = 0; i < count; i++) {
   16192 		uint16_t lo, hi;
   16193 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16194 		lo = (uint16_t)(mac_reg & 0xffff);
   16195 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16196 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16197 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16198 
   16199 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16200 		lo = (uint16_t)(mac_reg & 0xffff);
   16201 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16202 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16203 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16204 	}
   16205 
   16206 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16207 }
   16208 
   16209 /*
   16210  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16211  *  with 82579 PHY
   16212  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16213  */
   16214 static int
   16215 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16216 {
   16217 	device_t dev = sc->sc_dev;
   16218 	int rar_count;
   16219 	int rv;
   16220 	uint32_t mac_reg;
   16221 	uint16_t dft_ctrl, data;
   16222 	uint16_t i;
   16223 
   16224 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16225 		device_xname(dev), __func__));
   16226 
   16227 	if (sc->sc_type < WM_T_PCH2)
   16228 		return 0;
   16229 
   16230 	/* Acquire PHY semaphore */
   16231 	rv = sc->phy.acquire(sc);
   16232 	if (rv != 0)
   16233 		return rv;
   16234 
   16235 	/* Disable Rx path while enabling/disabling workaround */
   16236 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16237 	if (rv != 0)
   16238 		goto out;
   16239 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16240 	    dft_ctrl | (1 << 14));
   16241 	if (rv != 0)
   16242 		goto out;
   16243 
   16244 	if (enable) {
   16245 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16246 		 * SHRAL/H) and initial CRC values to the MAC
   16247 		 */
   16248 		rar_count = wm_rar_count(sc);
   16249 		for (i = 0; i < rar_count; i++) {
   16250 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16251 			uint32_t addr_high, addr_low;
   16252 
   16253 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16254 			if (!(addr_high & RAL_AV))
   16255 				continue;
   16256 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16257 			mac_addr[0] = (addr_low & 0xFF);
   16258 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16259 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16260 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16261 			mac_addr[4] = (addr_high & 0xFF);
   16262 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16263 
   16264 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16265 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16266 		}
   16267 
   16268 		/* Write Rx addresses to the PHY */
   16269 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16270 	}
   16271 
   16272 	/*
   16273 	 * If enable ==
   16274 	 *	true: Enable jumbo frame workaround in the MAC.
   16275 	 *	false: Write MAC register values back to h/w defaults.
   16276 	 */
   16277 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16278 	if (enable) {
   16279 		mac_reg &= ~(1 << 14);
   16280 		mac_reg |= (7 << 15);
   16281 	} else
   16282 		mac_reg &= ~(0xf << 14);
   16283 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16284 
   16285 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16286 	if (enable) {
   16287 		mac_reg |= RCTL_SECRC;
   16288 		sc->sc_rctl |= RCTL_SECRC;
   16289 		sc->sc_flags |= WM_F_CRC_STRIP;
   16290 	} else {
   16291 		mac_reg &= ~RCTL_SECRC;
   16292 		sc->sc_rctl &= ~RCTL_SECRC;
   16293 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16294 	}
   16295 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16296 
   16297 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16298 	if (rv != 0)
   16299 		goto out;
   16300 	if (enable)
   16301 		data |= 1 << 0;
   16302 	else
   16303 		data &= ~(1 << 0);
   16304 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16305 	if (rv != 0)
   16306 		goto out;
   16307 
   16308 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16309 	if (rv != 0)
   16310 		goto out;
   16311 	/*
   16312 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16313 	 * on both the enable case and the disable case. Is it correct?
   16314 	 */
   16315 	data &= ~(0xf << 8);
   16316 	data |= (0xb << 8);
   16317 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16318 	if (rv != 0)
   16319 		goto out;
   16320 
   16321 	/*
   16322 	 * If enable ==
   16323 	 *	true: Enable jumbo frame workaround in the PHY.
   16324 	 *	false: Write PHY register values back to h/w defaults.
   16325 	 */
   16326 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16327 	if (rv != 0)
   16328 		goto out;
   16329 	data &= ~(0x7F << 5);
   16330 	if (enable)
   16331 		data |= (0x37 << 5);
   16332 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16333 	if (rv != 0)
   16334 		goto out;
   16335 
   16336 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16337 	if (rv != 0)
   16338 		goto out;
   16339 	if (enable)
   16340 		data &= ~(1 << 13);
   16341 	else
   16342 		data |= (1 << 13);
   16343 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16344 	if (rv != 0)
   16345 		goto out;
   16346 
   16347 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16348 	if (rv != 0)
   16349 		goto out;
   16350 	data &= ~(0x3FF << 2);
   16351 	if (enable)
   16352 		data |= (I82579_TX_PTR_GAP << 2);
   16353 	else
   16354 		data |= (0x8 << 2);
   16355 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16356 	if (rv != 0)
   16357 		goto out;
   16358 
   16359 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16360 	    enable ? 0xf100 : 0x7e00);
   16361 	if (rv != 0)
   16362 		goto out;
   16363 
   16364 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16365 	if (rv != 0)
   16366 		goto out;
   16367 	if (enable)
   16368 		data |= 1 << 10;
   16369 	else
   16370 		data &= ~(1 << 10);
   16371 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16372 	if (rv != 0)
   16373 		goto out;
   16374 
   16375 	/* Re-enable Rx path after enabling/disabling workaround */
   16376 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16377 	    dft_ctrl & ~(1 << 14));
   16378 
   16379 out:
   16380 	sc->phy.release(sc);
   16381 
   16382 	return rv;
   16383 }
   16384 
   16385 /*
   16386  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16387  *  done after every PHY reset.
   16388  */
   16389 static int
   16390 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16391 {
   16392 	device_t dev = sc->sc_dev;
   16393 	int rv;
   16394 
   16395 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16396 		device_xname(dev), __func__));
   16397 	KASSERT(sc->sc_type == WM_T_PCH2);
   16398 
   16399 	/* Set MDIO slow mode before any other MDIO access */
   16400 	rv = wm_set_mdio_slow_mode_hv(sc);
   16401 	if (rv != 0)
   16402 		return rv;
   16403 
   16404 	rv = sc->phy.acquire(sc);
   16405 	if (rv != 0)
   16406 		return rv;
   16407 	/* Set MSE higher to enable link to stay up when noise is high */
   16408 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16409 	if (rv != 0)
   16410 		goto release;
   16411 	/* Drop link after 5 times MSE threshold was reached */
   16412 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16413 release:
   16414 	sc->phy.release(sc);
   16415 
   16416 	return rv;
   16417 }
   16418 
   16419 /**
   16420  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16421  *  @link: link up bool flag
   16422  *
   16423  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16424  *  preventing further DMA write requests.  Workaround the issue by disabling
   16425  *  the de-assertion of the clock request when in 1Gpbs mode.
   16426  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16427  *  speeds in order to avoid Tx hangs.
   16428  **/
   16429 static int
   16430 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16431 {
   16432 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16433 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16434 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16435 	uint16_t phyreg;
   16436 
   16437 	if (link && (speed == STATUS_SPEED_1000)) {
   16438 		sc->phy.acquire(sc);
   16439 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16440 		    &phyreg);
   16441 		if (rv != 0)
   16442 			goto release;
   16443 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16444 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16445 		if (rv != 0)
   16446 			goto release;
   16447 		delay(20);
   16448 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16449 
   16450 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16451 		    &phyreg);
   16452 release:
   16453 		sc->phy.release(sc);
   16454 		return rv;
   16455 	}
   16456 
   16457 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16458 
   16459 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16460 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16461 	    || !link
   16462 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16463 		goto update_fextnvm6;
   16464 
   16465 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16466 
   16467 	/* Clear link status transmit timeout */
   16468 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16469 	if (speed == STATUS_SPEED_100) {
   16470 		/* Set inband Tx timeout to 5x10us for 100Half */
   16471 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16472 
   16473 		/* Do not extend the K1 entry latency for 100Half */
   16474 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16475 	} else {
   16476 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16477 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16478 
   16479 		/* Extend the K1 entry latency for 10 Mbps */
   16480 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16481 	}
   16482 
   16483 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16484 
   16485 update_fextnvm6:
   16486 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16487 	return 0;
   16488 }
   16489 
   16490 /*
   16491  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16492  *  @sc:   pointer to the HW structure
   16493  *  @link: link up bool flag
   16494  *
   16495  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16496  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16497  *  If link is down, the function will restore the default K1 setting located
   16498  *  in the NVM.
   16499  */
   16500 static int
   16501 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16502 {
   16503 	int k1_enable = sc->sc_nvm_k1_enabled;
   16504 
   16505 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16506 		device_xname(sc->sc_dev), __func__));
   16507 
   16508 	if (sc->phy.acquire(sc) != 0)
   16509 		return -1;
   16510 
   16511 	if (link) {
   16512 		k1_enable = 0;
   16513 
   16514 		/* Link stall fix for link up */
   16515 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16516 		    0x0100);
   16517 	} else {
   16518 		/* Link stall fix for link down */
   16519 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16520 		    0x4100);
   16521 	}
   16522 
   16523 	wm_configure_k1_ich8lan(sc, k1_enable);
   16524 	sc->phy.release(sc);
   16525 
   16526 	return 0;
   16527 }
   16528 
   16529 /*
   16530  *  wm_k1_workaround_lv - K1 Si workaround
   16531  *  @sc:   pointer to the HW structure
   16532  *
   16533  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16534  *  Disable K1 for 1000 and 100 speeds
   16535  */
   16536 static int
   16537 wm_k1_workaround_lv(struct wm_softc *sc)
   16538 {
   16539 	uint32_t reg;
   16540 	uint16_t phyreg;
   16541 	int rv;
   16542 
   16543 	if (sc->sc_type != WM_T_PCH2)
   16544 		return 0;
   16545 
   16546 	/* Set K1 beacon duration based on 10Mbps speed */
   16547 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16548 	if (rv != 0)
   16549 		return rv;
   16550 
   16551 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16552 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16553 		if (phyreg &
   16554 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16555 			/* LV 1G/100 Packet drop issue wa  */
   16556 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16557 			    &phyreg);
   16558 			if (rv != 0)
   16559 				return rv;
   16560 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16561 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16562 			    phyreg);
   16563 			if (rv != 0)
   16564 				return rv;
   16565 		} else {
   16566 			/* For 10Mbps */
   16567 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16568 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16569 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16570 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16571 		}
   16572 	}
   16573 
   16574 	return 0;
   16575 }
   16576 
   16577 /*
   16578  *  wm_link_stall_workaround_hv - Si workaround
   16579  *  @sc: pointer to the HW structure
   16580  *
   16581  *  This function works around a Si bug where the link partner can get
   16582  *  a link up indication before the PHY does. If small packets are sent
   16583  *  by the link partner they can be placed in the packet buffer without
   16584  *  being properly accounted for by the PHY and will stall preventing
   16585  *  further packets from being received.  The workaround is to clear the
   16586  *  packet buffer after the PHY detects link up.
   16587  */
   16588 static int
   16589 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16590 {
   16591 	uint16_t phyreg;
   16592 
   16593 	if (sc->sc_phytype != WMPHY_82578)
   16594 		return 0;
   16595 
   16596 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16597 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16598 	if ((phyreg & BMCR_LOOP) != 0)
   16599 		return 0;
   16600 
   16601 	/* Check if link is up and at 1Gbps */
   16602 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16603 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16604 	    | BM_CS_STATUS_SPEED_MASK;
   16605 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16606 		| BM_CS_STATUS_SPEED_1000))
   16607 		return 0;
   16608 
   16609 	delay(200 * 1000);	/* XXX too big */
   16610 
   16611 	/* Flush the packets in the fifo buffer */
   16612 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16613 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16614 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16615 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16616 
   16617 	return 0;
   16618 }
   16619 
   16620 static int
   16621 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16622 {
   16623 	int rv;
   16624 	uint16_t reg;
   16625 
   16626 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16627 	if (rv != 0)
   16628 		return rv;
   16629 
   16630 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16631 	    reg | HV_KMRN_MDIO_SLOW);
   16632 }
   16633 
   16634 /*
   16635  *  wm_configure_k1_ich8lan - Configure K1 power state
   16636  *  @sc: pointer to the HW structure
   16637  *  @enable: K1 state to configure
   16638  *
   16639  *  Configure the K1 power state based on the provided parameter.
   16640  *  Assumes semaphore already acquired.
   16641  */
   16642 static void
   16643 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16644 {
   16645 	uint32_t ctrl, ctrl_ext, tmp;
   16646 	uint16_t kmreg;
   16647 	int rv;
   16648 
   16649 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16650 
   16651 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16652 	if (rv != 0)
   16653 		return;
   16654 
   16655 	if (k1_enable)
   16656 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16657 	else
   16658 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16659 
   16660 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16661 	if (rv != 0)
   16662 		return;
   16663 
   16664 	delay(20);
   16665 
   16666 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16667 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16668 
   16669 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16670 	tmp |= CTRL_FRCSPD;
   16671 
   16672 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16673 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16674 	CSR_WRITE_FLUSH(sc);
   16675 	delay(20);
   16676 
   16677 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16678 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16679 	CSR_WRITE_FLUSH(sc);
   16680 	delay(20);
   16681 
   16682 	return;
   16683 }
   16684 
   16685 /* special case - for 82575 - need to do manual init ... */
   16686 static void
   16687 wm_reset_init_script_82575(struct wm_softc *sc)
   16688 {
   16689 	/*
   16690 	 * Remark: this is untested code - we have no board without EEPROM
   16691 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16692 	 */
   16693 
   16694 	/* SerDes configuration via SERDESCTRL */
   16695 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16696 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16697 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16698 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16699 
   16700 	/* CCM configuration via CCMCTL register */
   16701 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16702 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16703 
   16704 	/* PCIe lanes configuration */
   16705 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16706 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16707 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16708 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16709 
   16710 	/* PCIe PLL Configuration */
   16711 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16712 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16713 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16714 }
   16715 
   16716 static void
   16717 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16718 {
   16719 	uint32_t reg;
   16720 	uint16_t nvmword;
   16721 	int rv;
   16722 
   16723 	if (sc->sc_type != WM_T_82580)
   16724 		return;
   16725 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16726 		return;
   16727 
   16728 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16729 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16730 	if (rv != 0) {
   16731 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16732 		    __func__);
   16733 		return;
   16734 	}
   16735 
   16736 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16737 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16738 		reg |= MDICNFG_DEST;
   16739 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16740 		reg |= MDICNFG_COM_MDIO;
   16741 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16742 }
   16743 
   16744 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16745 
   16746 static bool
   16747 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16748 {
   16749 	uint32_t reg;
   16750 	uint16_t id1, id2;
   16751 	int i, rv;
   16752 
   16753 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16754 		device_xname(sc->sc_dev), __func__));
   16755 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16756 
   16757 	id1 = id2 = 0xffff;
   16758 	for (i = 0; i < 2; i++) {
   16759 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16760 		    &id1);
   16761 		if ((rv != 0) || MII_INVALIDID(id1))
   16762 			continue;
   16763 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16764 		    &id2);
   16765 		if ((rv != 0) || MII_INVALIDID(id2))
   16766 			continue;
   16767 		break;
   16768 	}
   16769 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16770 		goto out;
   16771 
   16772 	/*
   16773 	 * In case the PHY needs to be in mdio slow mode,
   16774 	 * set slow mode and try to get the PHY id again.
   16775 	 */
   16776 	rv = 0;
   16777 	if (sc->sc_type < WM_T_PCH_LPT) {
   16778 		sc->phy.release(sc);
   16779 		wm_set_mdio_slow_mode_hv(sc);
   16780 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16781 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16782 		sc->phy.acquire(sc);
   16783 	}
   16784 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16785 		device_printf(sc->sc_dev, "XXX return with false\n");
   16786 		return false;
   16787 	}
   16788 out:
   16789 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16790 		/* Only unforce SMBus if ME is not active */
   16791 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16792 			uint16_t phyreg;
   16793 
   16794 			/* Unforce SMBus mode in PHY */
   16795 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16796 			    CV_SMB_CTRL, &phyreg);
   16797 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16798 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16799 			    CV_SMB_CTRL, phyreg);
   16800 
   16801 			/* Unforce SMBus mode in MAC */
   16802 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16803 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16804 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16805 		}
   16806 	}
   16807 	return true;
   16808 }
   16809 
   16810 static void
   16811 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16812 {
   16813 	uint32_t reg;
   16814 	int i;
   16815 
   16816 	/* Set PHY Config Counter to 50msec */
   16817 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16818 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16819 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16820 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16821 
   16822 	/* Toggle LANPHYPC */
   16823 	reg = CSR_READ(sc, WMREG_CTRL);
   16824 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16825 	reg &= ~CTRL_LANPHYPC_VALUE;
   16826 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16827 	CSR_WRITE_FLUSH(sc);
   16828 	delay(1000);
   16829 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16830 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16831 	CSR_WRITE_FLUSH(sc);
   16832 
   16833 	if (sc->sc_type < WM_T_PCH_LPT)
   16834 		delay(50 * 1000);
   16835 	else {
   16836 		i = 20;
   16837 
   16838 		do {
   16839 			delay(5 * 1000);
   16840 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16841 		    && i--);
   16842 
   16843 		delay(30 * 1000);
   16844 	}
   16845 }
   16846 
   16847 static int
   16848 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16849 {
   16850 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16851 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16852 	uint32_t rxa;
   16853 	uint16_t scale = 0, lat_enc = 0;
   16854 	int32_t obff_hwm = 0;
   16855 	int64_t lat_ns, value;
   16856 
   16857 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16858 		device_xname(sc->sc_dev), __func__));
   16859 
   16860 	if (link) {
   16861 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16862 		uint32_t status;
   16863 		uint16_t speed;
   16864 		pcireg_t preg;
   16865 
   16866 		status = CSR_READ(sc, WMREG_STATUS);
   16867 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16868 		case STATUS_SPEED_10:
   16869 			speed = 10;
   16870 			break;
   16871 		case STATUS_SPEED_100:
   16872 			speed = 100;
   16873 			break;
   16874 		case STATUS_SPEED_1000:
   16875 			speed = 1000;
   16876 			break;
   16877 		default:
   16878 			device_printf(sc->sc_dev, "Unknown speed "
   16879 			    "(status = %08x)\n", status);
   16880 			return -1;
   16881 		}
   16882 
   16883 		/* Rx Packet Buffer Allocation size (KB) */
   16884 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16885 
   16886 		/*
   16887 		 * Determine the maximum latency tolerated by the device.
   16888 		 *
   16889 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16890 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16891 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16892 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16893 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16894 		 */
   16895 		lat_ns = ((int64_t)rxa * 1024 -
   16896 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16897 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16898 		if (lat_ns < 0)
   16899 			lat_ns = 0;
   16900 		else
   16901 			lat_ns /= speed;
   16902 		value = lat_ns;
   16903 
   16904 		while (value > LTRV_VALUE) {
   16905 			scale ++;
   16906 			value = howmany(value, __BIT(5));
   16907 		}
   16908 		if (scale > LTRV_SCALE_MAX) {
   16909 			device_printf(sc->sc_dev,
   16910 			    "Invalid LTR latency scale %d\n", scale);
   16911 			return -1;
   16912 		}
   16913 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16914 
   16915 		/* Determine the maximum latency tolerated by the platform */
   16916 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16917 		    WM_PCI_LTR_CAP_LPT);
   16918 		max_snoop = preg & 0xffff;
   16919 		max_nosnoop = preg >> 16;
   16920 
   16921 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16922 
   16923 		if (lat_enc > max_ltr_enc) {
   16924 			lat_enc = max_ltr_enc;
   16925 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16926 			    * PCI_LTR_SCALETONS(
   16927 				    __SHIFTOUT(lat_enc,
   16928 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16929 		}
   16930 
   16931 		if (lat_ns) {
   16932 			lat_ns *= speed * 1000;
   16933 			lat_ns /= 8;
   16934 			lat_ns /= 1000000000;
   16935 			obff_hwm = (int32_t)(rxa - lat_ns);
   16936 		}
   16937 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16938 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16939 			    "(rxa = %d, lat_ns = %d)\n",
   16940 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16941 			return -1;
   16942 		}
   16943 	}
   16944 	/* Snoop and No-Snoop latencies the same */
   16945 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16946 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16947 
   16948 	/* Set OBFF high water mark */
   16949 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16950 	reg |= obff_hwm;
   16951 	CSR_WRITE(sc, WMREG_SVT, reg);
   16952 
   16953 	/* Enable OBFF */
   16954 	reg = CSR_READ(sc, WMREG_SVCR);
   16955 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16956 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16957 
   16958 	return 0;
   16959 }
   16960 
   16961 /*
   16962  * I210 Errata 25 and I211 Errata 10
   16963  * Slow System Clock.
   16964  *
   16965  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16966  */
   16967 static int
   16968 wm_pll_workaround_i210(struct wm_softc *sc)
   16969 {
   16970 	uint32_t mdicnfg, wuc;
   16971 	uint32_t reg;
   16972 	pcireg_t pcireg;
   16973 	uint32_t pmreg;
   16974 	uint16_t nvmword, tmp_nvmword;
   16975 	uint16_t phyval;
   16976 	bool wa_done = false;
   16977 	int i, rv = 0;
   16978 
   16979 	/* Get Power Management cap offset */
   16980 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16981 	    &pmreg, NULL) == 0)
   16982 		return -1;
   16983 
   16984 	/* Save WUC and MDICNFG registers */
   16985 	wuc = CSR_READ(sc, WMREG_WUC);
   16986 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16987 
   16988 	reg = mdicnfg & ~MDICNFG_DEST;
   16989 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16990 
   16991 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   16992 		/*
   16993 		 * The default value of the Initialization Control Word 1
   16994 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   16995 		 */
   16996 		nvmword = INVM_DEFAULT_AL;
   16997 	}
   16998 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16999 
   17000 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17001 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17002 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17003 
   17004 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17005 			rv = 0;
   17006 			break; /* OK */
   17007 		} else
   17008 			rv = -1;
   17009 
   17010 		wa_done = true;
   17011 		/* Directly reset the internal PHY */
   17012 		reg = CSR_READ(sc, WMREG_CTRL);
   17013 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17014 
   17015 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17016 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17017 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17018 
   17019 		CSR_WRITE(sc, WMREG_WUC, 0);
   17020 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17021 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17022 
   17023 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17024 		    pmreg + PCI_PMCSR);
   17025 		pcireg |= PCI_PMCSR_STATE_D3;
   17026 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17027 		    pmreg + PCI_PMCSR, pcireg);
   17028 		delay(1000);
   17029 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17030 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17031 		    pmreg + PCI_PMCSR, pcireg);
   17032 
   17033 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17034 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17035 
   17036 		/* Restore WUC register */
   17037 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17038 	}
   17039 
   17040 	/* Restore MDICNFG setting */
   17041 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17042 	if (wa_done)
   17043 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17044 	return rv;
   17045 }
   17046 
   17047 static void
   17048 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17049 {
   17050 	uint32_t reg;
   17051 
   17052 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17053 		device_xname(sc->sc_dev), __func__));
   17054 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17055 	    || (sc->sc_type == WM_T_PCH_CNP));
   17056 
   17057 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17058 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17059 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17060 
   17061 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17062 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17063 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17064 }
   17065 
   17066 /* Sysctl function */
   17067 #ifdef WM_DEBUG
   17068 static int
   17069 wm_sysctl_debug(SYSCTLFN_ARGS)
   17070 {
   17071 	struct sysctlnode node = *rnode;
   17072 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17073 	uint32_t dflags;
   17074 	int error;
   17075 
   17076 	dflags = sc->sc_debug;
   17077 	node.sysctl_data = &dflags;
   17078 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17079 
   17080 	if (error || newp == NULL)
   17081 		return error;
   17082 
   17083 	sc->sc_debug = dflags;
   17084 
   17085 	return 0;
   17086 }
   17087 #endif
   17088