Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.681
      1 /*	$NetBSD: if_wm.c,v 1.681 2020/07/09 06:42:44 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.681 2020/07/09 06:42:44 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    160     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    161 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    162 #else
    163 #define	DPRINTF(x, y)	__nothing
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    170 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    171 #else
    172 #define WM_CALLOUT_FLAGS	0
    173 #define WM_SOFTINT_FLAGS	0
    174 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    175 #endif
    176 
    177 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    178 
    179 /*
    180  * This device driver's max interrupt numbers.
    181  */
    182 #define WM_MAX_NQUEUEINTR	16
    183 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    184 
    185 #ifndef WM_DISABLE_MSI
    186 #define	WM_DISABLE_MSI 0
    187 #endif
    188 #ifndef WM_DISABLE_MSIX
    189 #define	WM_DISABLE_MSIX 0
    190 #endif
    191 
    192 int wm_disable_msi = WM_DISABLE_MSI;
    193 int wm_disable_msix = WM_DISABLE_MSIX;
    194 
    195 #ifndef WM_WATCHDOG_TIMEOUT
    196 #define WM_WATCHDOG_TIMEOUT 5
    197 #endif
    198 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    199 
    200 /*
    201  * Transmit descriptor list size.  Due to errata, we can only have
    202  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    203  * on >= 82544. We tell the upper layers that they can queue a lot
    204  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    205  * of them at a time.
    206  *
    207  * We allow up to 64 DMA segments per packet.  Pathological packet
    208  * chains containing many small mbufs have been observed in zero-copy
    209  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    210  * m_defrag() is called to reduce it.
    211  */
    212 #define	WM_NTXSEGS		64
    213 #define	WM_IFQUEUELEN		256
    214 #define	WM_TXQUEUELEN_MAX	64
    215 #define	WM_TXQUEUELEN_MAX_82547	16
    216 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    217 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    218 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    219 #define	WM_NTXDESC_82542	256
    220 #define	WM_NTXDESC_82544	4096
    221 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    222 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    223 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    224 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    225 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    226 
    227 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    228 
    229 #define	WM_TXINTERQSIZE		256
    230 
    231 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    232 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    233 #endif
    234 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    235 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    236 #endif
    237 
    238 /*
    239  * Receive descriptor list size.  We have one Rx buffer for normal
    240  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    241  * packet.  We allocate 256 receive descriptors, each with a 2k
    242  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    243  */
    244 #define	WM_NRXDESC		256U
    245 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    246 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    247 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    248 
    249 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    250 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    251 #endif
    252 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    253 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    254 #endif
    255 
    256 typedef union txdescs {
    257 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    258 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    259 } txdescs_t;
    260 
    261 typedef union rxdescs {
    262 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    263 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    264 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    265 } rxdescs_t;
    266 
    267 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    268 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    269 
    270 /*
    271  * Software state for transmit jobs.
    272  */
    273 struct wm_txsoft {
    274 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    275 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    276 	int txs_firstdesc;		/* first descriptor in packet */
    277 	int txs_lastdesc;		/* last descriptor in packet */
    278 	int txs_ndesc;			/* # of descriptors used */
    279 };
    280 
    281 /*
    282  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    283  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    284  * them together.
    285  */
    286 struct wm_rxsoft {
    287 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    288 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    289 };
    290 
    291 #define WM_LINKUP_TIMEOUT	50
    292 
    293 static uint16_t swfwphysem[] = {
    294 	SWFW_PHY0_SM,
    295 	SWFW_PHY1_SM,
    296 	SWFW_PHY2_SM,
    297 	SWFW_PHY3_SM
    298 };
    299 
    300 static const uint32_t wm_82580_rxpbs_table[] = {
    301 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    302 };
    303 
    304 struct wm_softc;
    305 
    306 #ifdef WM_EVENT_COUNTERS
    307 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    308 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    309 	struct evcnt qname##_ev_##evname;
    310 
    311 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    312 	do {								\
    313 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    314 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    315 		    "%s%02d%s", #qname, (qnum), #evname);		\
    316 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    317 		    (evtype), NULL, (xname),				\
    318 		    (q)->qname##_##evname##_evcnt_name);		\
    319 	} while (0)
    320 
    321 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    322 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    323 
    324 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    325 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    326 
    327 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    328 	evcnt_detach(&(q)->qname##_ev_##evname);
    329 #endif /* WM_EVENT_COUNTERS */
    330 
    331 struct wm_txqueue {
    332 	kmutex_t *txq_lock;		/* lock for tx operations */
    333 
    334 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    335 
    336 	/* Software state for the transmit descriptors. */
    337 	int txq_num;			/* must be a power of two */
    338 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    339 
    340 	/* TX control data structures. */
    341 	int txq_ndesc;			/* must be a power of two */
    342 	size_t txq_descsize;		/* a tx descriptor size */
    343 	txdescs_t *txq_descs_u;
    344 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    345 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    346 	int txq_desc_rseg;		/* real number of control segment */
    347 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    348 #define	txq_descs	txq_descs_u->sctxu_txdescs
    349 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    350 
    351 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    352 
    353 	int txq_free;			/* number of free Tx descriptors */
    354 	int txq_next;			/* next ready Tx descriptor */
    355 
    356 	int txq_sfree;			/* number of free Tx jobs */
    357 	int txq_snext;			/* next free Tx job */
    358 	int txq_sdirty;			/* dirty Tx jobs */
    359 
    360 	/* These 4 variables are used only on the 82547. */
    361 	int txq_fifo_size;		/* Tx FIFO size */
    362 	int txq_fifo_head;		/* current head of FIFO */
    363 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    364 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    365 
    366 	/*
    367 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    368 	 * CPUs. This queue intermediate them without block.
    369 	 */
    370 	pcq_t *txq_interq;
    371 
    372 	/*
    373 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    374 	 * to manage Tx H/W queue's busy flag.
    375 	 */
    376 	int txq_flags;			/* flags for H/W queue, see below */
    377 #define	WM_TXQ_NO_SPACE	0x1
    378 
    379 	bool txq_stopping;
    380 
    381 	bool txq_sending;
    382 	time_t txq_lastsent;
    383 
    384 	/* Checksum flags used for previous packet */
    385 	uint32_t 	txq_last_hw_cmd;
    386 	uint8_t 	txq_last_hw_fields;
    387 	uint16_t	txq_last_hw_ipcs;
    388 	uint16_t	txq_last_hw_tucs;
    389 
    390 	uint32_t txq_packets;		/* for AIM */
    391 	uint32_t txq_bytes;		/* for AIM */
    392 #ifdef WM_EVENT_COUNTERS
    393 	/* TX event counters */
    394 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    395 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    396 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    397 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    398 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    399 					    /* XXX not used? */
    400 
    401 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    402 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    403 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    404 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    405 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    406 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    407 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    408 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    409 					    /* other than toomanyseg */
    410 
    411 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    412 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    413 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    414 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    415 
    416 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    417 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    418 #endif /* WM_EVENT_COUNTERS */
    419 };
    420 
    421 struct wm_rxqueue {
    422 	kmutex_t *rxq_lock;		/* lock for rx operations */
    423 
    424 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    425 
    426 	/* Software state for the receive descriptors. */
    427 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    428 
    429 	/* RX control data structures. */
    430 	int rxq_ndesc;			/* must be a power of two */
    431 	size_t rxq_descsize;		/* a rx descriptor size */
    432 	rxdescs_t *rxq_descs_u;
    433 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    434 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    435 	int rxq_desc_rseg;		/* real number of control segment */
    436 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    437 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    438 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    439 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    440 
    441 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    442 
    443 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    444 	int rxq_discard;
    445 	int rxq_len;
    446 	struct mbuf *rxq_head;
    447 	struct mbuf *rxq_tail;
    448 	struct mbuf **rxq_tailp;
    449 
    450 	bool rxq_stopping;
    451 
    452 	uint32_t rxq_packets;		/* for AIM */
    453 	uint32_t rxq_bytes;		/* for AIM */
    454 #ifdef WM_EVENT_COUNTERS
    455 	/* RX event counters */
    456 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    457 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    458 
    459 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    460 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    461 #endif
    462 };
    463 
    464 struct wm_queue {
    465 	int wmq_id;			/* index of TX/RX queues */
    466 	int wmq_intr_idx;		/* index of MSI-X tables */
    467 
    468 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    469 	bool wmq_set_itr;
    470 
    471 	struct wm_txqueue wmq_txq;
    472 	struct wm_rxqueue wmq_rxq;
    473 
    474 	bool wmq_txrx_use_workqueue;
    475 	struct work wmq_cookie;
    476 	void *wmq_si;
    477 };
    478 
    479 struct wm_phyop {
    480 	int (*acquire)(struct wm_softc *);
    481 	void (*release)(struct wm_softc *);
    482 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    483 	int (*writereg_locked)(device_t, int, int, uint16_t);
    484 	int reset_delay_us;
    485 	bool no_errprint;
    486 };
    487 
    488 struct wm_nvmop {
    489 	int (*acquire)(struct wm_softc *);
    490 	void (*release)(struct wm_softc *);
    491 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    492 };
    493 
    494 /*
    495  * Software state per device.
    496  */
    497 struct wm_softc {
    498 	device_t sc_dev;		/* generic device information */
    499 	bus_space_tag_t sc_st;		/* bus space tag */
    500 	bus_space_handle_t sc_sh;	/* bus space handle */
    501 	bus_size_t sc_ss;		/* bus space size */
    502 	bus_space_tag_t sc_iot;		/* I/O space tag */
    503 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    504 	bus_size_t sc_ios;		/* I/O space size */
    505 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    506 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    507 	bus_size_t sc_flashs;		/* flash registers space size */
    508 	off_t sc_flashreg_offset;	/*
    509 					 * offset to flash registers from
    510 					 * start of BAR
    511 					 */
    512 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    513 
    514 	struct ethercom sc_ethercom;	/* ethernet common data */
    515 	struct mii_data sc_mii;		/* MII/media information */
    516 
    517 	pci_chipset_tag_t sc_pc;
    518 	pcitag_t sc_pcitag;
    519 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    520 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    521 
    522 	uint16_t sc_pcidevid;		/* PCI device ID */
    523 	wm_chip_type sc_type;		/* MAC type */
    524 	int sc_rev;			/* MAC revision */
    525 	wm_phy_type sc_phytype;		/* PHY type */
    526 	uint8_t sc_sfptype;		/* SFP type */
    527 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    528 #define	WM_MEDIATYPE_UNKNOWN		0x00
    529 #define	WM_MEDIATYPE_FIBER		0x01
    530 #define	WM_MEDIATYPE_COPPER		0x02
    531 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    532 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    533 	int sc_flags;			/* flags; see below */
    534 	u_short sc_if_flags;		/* last if_flags */
    535 	int sc_ec_capenable;		/* last ec_capenable */
    536 	int sc_flowflags;		/* 802.3x flow control flags */
    537 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    538 	int sc_align_tweak;
    539 
    540 	void *sc_ihs[WM_MAX_NINTR];	/*
    541 					 * interrupt cookie.
    542 					 * - legacy and msi use sc_ihs[0] only
    543 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    544 					 */
    545 	pci_intr_handle_t *sc_intrs;	/*
    546 					 * legacy and msi use sc_intrs[0] only
    547 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    548 					 */
    549 	int sc_nintrs;			/* number of interrupts */
    550 
    551 	int sc_link_intr_idx;		/* index of MSI-X tables */
    552 
    553 	callout_t sc_tick_ch;		/* tick callout */
    554 	bool sc_core_stopping;
    555 
    556 	int sc_nvm_ver_major;
    557 	int sc_nvm_ver_minor;
    558 	int sc_nvm_ver_build;
    559 	int sc_nvm_addrbits;		/* NVM address bits */
    560 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    561 	int sc_ich8_flash_base;
    562 	int sc_ich8_flash_bank_size;
    563 	int sc_nvm_k1_enabled;
    564 
    565 	int sc_nqueues;
    566 	struct wm_queue *sc_queue;
    567 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    568 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    569 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    570 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    571 	struct workqueue *sc_queue_wq;
    572 	bool sc_txrx_use_workqueue;
    573 
    574 	int sc_affinity_offset;
    575 
    576 #ifdef WM_EVENT_COUNTERS
    577 	/* Event counters. */
    578 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    579 
    580 	/* WM_T_82542_2_1 only */
    581 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    582 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    583 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    584 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    585 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    586 #endif /* WM_EVENT_COUNTERS */
    587 
    588 	struct sysctllog *sc_sysctllog;
    589 
    590 	/* This variable are used only on the 82547. */
    591 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    592 
    593 	uint32_t sc_ctrl;		/* prototype CTRL register */
    594 #if 0
    595 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    596 #endif
    597 	uint32_t sc_icr;		/* prototype interrupt bits */
    598 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    599 	uint32_t sc_tctl;		/* prototype TCTL register */
    600 	uint32_t sc_rctl;		/* prototype RCTL register */
    601 	uint32_t sc_txcw;		/* prototype TXCW register */
    602 	uint32_t sc_tipg;		/* prototype TIPG register */
    603 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    604 	uint32_t sc_pba;		/* prototype PBA register */
    605 
    606 	int sc_tbi_linkup;		/* TBI link status */
    607 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    608 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    609 
    610 	int sc_mchash_type;		/* multicast filter offset */
    611 
    612 	krndsource_t rnd_source;	/* random source */
    613 
    614 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    615 
    616 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    617 	kmutex_t *sc_ich_phymtx;	/*
    618 					 * 82574/82583/ICH/PCH specific PHY
    619 					 * mutex. For 82574/82583, the mutex
    620 					 * is used for both PHY and NVM.
    621 					 */
    622 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    623 
    624 	struct wm_phyop phy;
    625 	struct wm_nvmop nvm;
    626 };
    627 
    628 #define WM_CORE_LOCK(_sc)						\
    629 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    630 #define WM_CORE_UNLOCK(_sc)						\
    631 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    632 #define WM_CORE_LOCKED(_sc)						\
    633 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    634 
    635 #define	WM_RXCHAIN_RESET(rxq)						\
    636 do {									\
    637 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    638 	*(rxq)->rxq_tailp = NULL;					\
    639 	(rxq)->rxq_len = 0;						\
    640 } while (/*CONSTCOND*/0)
    641 
    642 #define	WM_RXCHAIN_LINK(rxq, m)						\
    643 do {									\
    644 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    645 	(rxq)->rxq_tailp = &(m)->m_next;				\
    646 } while (/*CONSTCOND*/0)
    647 
    648 #ifdef WM_EVENT_COUNTERS
    649 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    650 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    651 
    652 #define WM_Q_EVCNT_INCR(qname, evname)			\
    653 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    654 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    655 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    656 #else /* !WM_EVENT_COUNTERS */
    657 #define	WM_EVCNT_INCR(ev)	/* nothing */
    658 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    659 
    660 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    661 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    662 #endif /* !WM_EVENT_COUNTERS */
    663 
    664 #define	CSR_READ(sc, reg)						\
    665 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    666 #define	CSR_WRITE(sc, reg, val)						\
    667 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    668 #define	CSR_WRITE_FLUSH(sc)						\
    669 	(void)CSR_READ((sc), WMREG_STATUS)
    670 
    671 #define ICH8_FLASH_READ32(sc, reg)					\
    672 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    673 	    (reg) + sc->sc_flashreg_offset)
    674 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    675 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    676 	    (reg) + sc->sc_flashreg_offset, (data))
    677 
    678 #define ICH8_FLASH_READ16(sc, reg)					\
    679 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    680 	    (reg) + sc->sc_flashreg_offset)
    681 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    682 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    683 	    (reg) + sc->sc_flashreg_offset, (data))
    684 
    685 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    686 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    687 
    688 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    689 #define	WM_CDTXADDR_HI(txq, x)						\
    690 	(sizeof(bus_addr_t) == 8 ?					\
    691 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    692 
    693 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    694 #define	WM_CDRXADDR_HI(rxq, x)						\
    695 	(sizeof(bus_addr_t) == 8 ?					\
    696 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    697 
    698 /*
    699  * Register read/write functions.
    700  * Other than CSR_{READ|WRITE}().
    701  */
    702 #if 0
    703 static inline uint32_t wm_io_read(struct wm_softc *, int);
    704 #endif
    705 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    706 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    707     uint32_t, uint32_t);
    708 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    709 
    710 /*
    711  * Descriptor sync/init functions.
    712  */
    713 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    714 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    715 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    716 
    717 /*
    718  * Device driver interface functions and commonly used functions.
    719  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    720  */
    721 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    722 static int	wm_match(device_t, cfdata_t, void *);
    723 static void	wm_attach(device_t, device_t, void *);
    724 static int	wm_detach(device_t, int);
    725 static bool	wm_suspend(device_t, const pmf_qual_t *);
    726 static bool	wm_resume(device_t, const pmf_qual_t *);
    727 static void	wm_watchdog(struct ifnet *);
    728 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    729     uint16_t *);
    730 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    731     uint16_t *);
    732 static void	wm_tick(void *);
    733 static int	wm_ifflags_cb(struct ethercom *);
    734 static int	wm_ioctl(struct ifnet *, u_long, void *);
    735 /* MAC address related */
    736 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    737 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    738 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    739 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    740 static int	wm_rar_count(struct wm_softc *);
    741 static void	wm_set_filter(struct wm_softc *);
    742 /* Reset and init related */
    743 static void	wm_set_vlan(struct wm_softc *);
    744 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    745 static void	wm_get_auto_rd_done(struct wm_softc *);
    746 static void	wm_lan_init_done(struct wm_softc *);
    747 static void	wm_get_cfg_done(struct wm_softc *);
    748 static int	wm_phy_post_reset(struct wm_softc *);
    749 static int	wm_write_smbus_addr(struct wm_softc *);
    750 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    751 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    752 static void	wm_initialize_hardware_bits(struct wm_softc *);
    753 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    754 static int	wm_reset_phy(struct wm_softc *);
    755 static void	wm_flush_desc_rings(struct wm_softc *);
    756 static void	wm_reset(struct wm_softc *);
    757 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    758 static void	wm_rxdrain(struct wm_rxqueue *);
    759 static void	wm_init_rss(struct wm_softc *);
    760 static void	wm_adjust_qnum(struct wm_softc *, int);
    761 static inline bool	wm_is_using_msix(struct wm_softc *);
    762 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    763 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    764 static int	wm_setup_legacy(struct wm_softc *);
    765 static int	wm_setup_msix(struct wm_softc *);
    766 static int	wm_init(struct ifnet *);
    767 static int	wm_init_locked(struct ifnet *);
    768 static void	wm_init_sysctls(struct wm_softc *);
    769 static void	wm_unset_stopping_flags(struct wm_softc *);
    770 static void	wm_set_stopping_flags(struct wm_softc *);
    771 static void	wm_stop(struct ifnet *, int);
    772 static void	wm_stop_locked(struct ifnet *, bool, bool);
    773 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    774 static void	wm_82547_txfifo_stall(void *);
    775 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    776 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    777 /* DMA related */
    778 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    779 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    780 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    781 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    782     struct wm_txqueue *);
    783 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    784 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    785 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    786     struct wm_rxqueue *);
    787 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    788 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    789 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    790 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    791 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    792 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    793 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    794     struct wm_txqueue *);
    795 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    796     struct wm_rxqueue *);
    797 static int	wm_alloc_txrx_queues(struct wm_softc *);
    798 static void	wm_free_txrx_queues(struct wm_softc *);
    799 static int	wm_init_txrx_queues(struct wm_softc *);
    800 /* Start */
    801 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    802     struct wm_txsoft *, uint32_t *, uint8_t *);
    803 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    804 static void	wm_start(struct ifnet *);
    805 static void	wm_start_locked(struct ifnet *);
    806 static int	wm_transmit(struct ifnet *, struct mbuf *);
    807 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    808 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    809 		    bool);
    810 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    811     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    812 static void	wm_nq_start(struct ifnet *);
    813 static void	wm_nq_start_locked(struct ifnet *);
    814 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    815 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    816 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    817 		    bool);
    818 static void	wm_deferred_start_locked(struct wm_txqueue *);
    819 static void	wm_handle_queue(void *);
    820 static void	wm_handle_queue_work(struct work *, void *);
    821 /* Interrupt */
    822 static bool	wm_txeof(struct wm_txqueue *, u_int);
    823 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    824 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    825 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    826 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    827 static void	wm_linkintr(struct wm_softc *, uint32_t);
    828 static int	wm_intr_legacy(void *);
    829 static inline void	wm_txrxintr_disable(struct wm_queue *);
    830 static inline void	wm_txrxintr_enable(struct wm_queue *);
    831 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    832 static int	wm_txrxintr_msix(void *);
    833 static int	wm_linkintr_msix(void *);
    834 
    835 /*
    836  * Media related.
    837  * GMII, SGMII, TBI, SERDES and SFP.
    838  */
    839 /* Common */
    840 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    841 /* GMII related */
    842 static void	wm_gmii_reset(struct wm_softc *);
    843 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    844 static int	wm_get_phy_id_82575(struct wm_softc *);
    845 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    846 static int	wm_gmii_mediachange(struct ifnet *);
    847 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    848 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    849 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    850 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    851 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    852 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    853 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    854 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    855 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    856 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    857 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    858 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    859 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    860 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    861 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    862 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    863 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    864 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    865 	bool);
    866 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    867 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    868 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    869 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    870 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    871 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    872 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    873 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    874 static void	wm_gmii_statchg(struct ifnet *);
    875 /*
    876  * kumeran related (80003, ICH* and PCH*).
    877  * These functions are not for accessing MII registers but for accessing
    878  * kumeran specific registers.
    879  */
    880 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    881 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    882 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    883 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    884 /* EMI register related */
    885 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    886 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    887 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    888 /* SGMII */
    889 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    890 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    891 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    892 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    893 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    894 /* TBI related */
    895 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    896 static void	wm_tbi_mediainit(struct wm_softc *);
    897 static int	wm_tbi_mediachange(struct ifnet *);
    898 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    899 static int	wm_check_for_link(struct wm_softc *);
    900 static void	wm_tbi_tick(struct wm_softc *);
    901 /* SERDES related */
    902 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    903 static int	wm_serdes_mediachange(struct ifnet *);
    904 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    905 static void	wm_serdes_tick(struct wm_softc *);
    906 /* SFP related */
    907 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    908 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    909 
    910 /*
    911  * NVM related.
    912  * Microwire, SPI (w/wo EERD) and Flash.
    913  */
    914 /* Misc functions */
    915 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    916 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    917 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    918 /* Microwire */
    919 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    920 /* SPI */
    921 static int	wm_nvm_ready_spi(struct wm_softc *);
    922 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    923 /* Using with EERD */
    924 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    925 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    926 /* Flash */
    927 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    928     unsigned int *);
    929 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    930 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    931 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    932     uint32_t *);
    933 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    934 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    935 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    936 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    937 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    938 /* iNVM */
    939 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    940 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    941 /* Lock, detecting NVM type, validate checksum and read */
    942 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    943 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    944 static int	wm_nvm_validate_checksum(struct wm_softc *);
    945 static void	wm_nvm_version_invm(struct wm_softc *);
    946 static void	wm_nvm_version(struct wm_softc *);
    947 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    948 
    949 /*
    950  * Hardware semaphores.
    951  * Very complexed...
    952  */
    953 static int	wm_get_null(struct wm_softc *);
    954 static void	wm_put_null(struct wm_softc *);
    955 static int	wm_get_eecd(struct wm_softc *);
    956 static void	wm_put_eecd(struct wm_softc *);
    957 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    958 static void	wm_put_swsm_semaphore(struct wm_softc *);
    959 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    960 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    961 static int	wm_get_nvm_80003(struct wm_softc *);
    962 static void	wm_put_nvm_80003(struct wm_softc *);
    963 static int	wm_get_nvm_82571(struct wm_softc *);
    964 static void	wm_put_nvm_82571(struct wm_softc *);
    965 static int	wm_get_phy_82575(struct wm_softc *);
    966 static void	wm_put_phy_82575(struct wm_softc *);
    967 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    968 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    969 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    970 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    971 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    972 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    973 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    974 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    975 
    976 /*
    977  * Management mode and power management related subroutines.
    978  * BMC, AMT, suspend/resume and EEE.
    979  */
    980 #if 0
    981 static int	wm_check_mng_mode(struct wm_softc *);
    982 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    983 static int	wm_check_mng_mode_82574(struct wm_softc *);
    984 static int	wm_check_mng_mode_generic(struct wm_softc *);
    985 #endif
    986 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    987 static bool	wm_phy_resetisblocked(struct wm_softc *);
    988 static void	wm_get_hw_control(struct wm_softc *);
    989 static void	wm_release_hw_control(struct wm_softc *);
    990 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    991 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    992 static void	wm_init_manageability(struct wm_softc *);
    993 static void	wm_release_manageability(struct wm_softc *);
    994 static void	wm_get_wakeup(struct wm_softc *);
    995 static int	wm_ulp_disable(struct wm_softc *);
    996 static int	wm_enable_phy_wakeup(struct wm_softc *);
    997 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    998 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    999 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1000 static void	wm_enable_wakeup(struct wm_softc *);
   1001 static void	wm_disable_aspm(struct wm_softc *);
   1002 /* LPLU (Low Power Link Up) */
   1003 static void	wm_lplu_d0_disable(struct wm_softc *);
   1004 /* EEE */
   1005 static int	wm_set_eee_i350(struct wm_softc *);
   1006 static int	wm_set_eee_pchlan(struct wm_softc *);
   1007 static int	wm_set_eee(struct wm_softc *);
   1008 
   1009 /*
   1010  * Workarounds (mainly PHY related).
   1011  * Basically, PHY's workarounds are in the PHY drivers.
   1012  */
   1013 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1014 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1015 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1016 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1017 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1018 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1019 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1020 static int	wm_k1_workaround_lv(struct wm_softc *);
   1021 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1022 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1023 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1024 static void	wm_reset_init_script_82575(struct wm_softc *);
   1025 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1026 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1027 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1028 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1029 static int	wm_pll_workaround_i210(struct wm_softc *);
   1030 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1031 
   1032 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1033     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1034 
   1035 /*
   1036  * Devices supported by this driver.
   1037  */
   1038 static const struct wm_product {
   1039 	pci_vendor_id_t		wmp_vendor;
   1040 	pci_product_id_t	wmp_product;
   1041 	const char		*wmp_name;
   1042 	wm_chip_type		wmp_type;
   1043 	uint32_t		wmp_flags;
   1044 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1045 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1046 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1047 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1048 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1049 } wm_products[] = {
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1051 	  "Intel i82542 1000BASE-X Ethernet",
   1052 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1055 	  "Intel i82543GC 1000BASE-X Ethernet",
   1056 	  WM_T_82543,		WMP_F_FIBER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1059 	  "Intel i82543GC 1000BASE-T Ethernet",
   1060 	  WM_T_82543,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1063 	  "Intel i82544EI 1000BASE-T Ethernet",
   1064 	  WM_T_82544,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1067 	  "Intel i82544EI 1000BASE-X Ethernet",
   1068 	  WM_T_82544,		WMP_F_FIBER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1071 	  "Intel i82544GC 1000BASE-T Ethernet",
   1072 	  WM_T_82544,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1075 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1076 	  WM_T_82544,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1079 	  "Intel i82540EM 1000BASE-T Ethernet",
   1080 	  WM_T_82540,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1083 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1084 	  WM_T_82540,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1087 	  "Intel i82540EP 1000BASE-T Ethernet",
   1088 	  WM_T_82540,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1091 	  "Intel i82540EP 1000BASE-T Ethernet",
   1092 	  WM_T_82540,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1095 	  "Intel i82540EP 1000BASE-T Ethernet",
   1096 	  WM_T_82540,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1099 	  "Intel i82545EM 1000BASE-T Ethernet",
   1100 	  WM_T_82545,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1103 	  "Intel i82545GM 1000BASE-T Ethernet",
   1104 	  WM_T_82545_3,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1107 	  "Intel i82545GM 1000BASE-X Ethernet",
   1108 	  WM_T_82545_3,		WMP_F_FIBER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1111 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1112 	  WM_T_82545_3,		WMP_F_SERDES },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1115 	  "Intel i82546EB 1000BASE-T Ethernet",
   1116 	  WM_T_82546,		WMP_F_COPPER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1119 	  "Intel i82546EB 1000BASE-T Ethernet",
   1120 	  WM_T_82546,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1123 	  "Intel i82545EM 1000BASE-X Ethernet",
   1124 	  WM_T_82545,		WMP_F_FIBER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1127 	  "Intel i82546EB 1000BASE-X Ethernet",
   1128 	  WM_T_82546,		WMP_F_FIBER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1131 	  "Intel i82546GB 1000BASE-T Ethernet",
   1132 	  WM_T_82546_3,		WMP_F_COPPER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1135 	  "Intel i82546GB 1000BASE-X Ethernet",
   1136 	  WM_T_82546_3,		WMP_F_FIBER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1139 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1140 	  WM_T_82546_3,		WMP_F_SERDES },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1143 	  "i82546GB quad-port Gigabit Ethernet",
   1144 	  WM_T_82546_3,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1147 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1148 	  WM_T_82546_3,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1151 	  "Intel PRO/1000MT (82546GB)",
   1152 	  WM_T_82546_3,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1155 	  "Intel i82541EI 1000BASE-T Ethernet",
   1156 	  WM_T_82541,		WMP_F_COPPER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1159 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1160 	  WM_T_82541,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1163 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1164 	  WM_T_82541,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1167 	  "Intel i82541ER 1000BASE-T Ethernet",
   1168 	  WM_T_82541_2,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1171 	  "Intel i82541GI 1000BASE-T Ethernet",
   1172 	  WM_T_82541_2,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1175 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1176 	  WM_T_82541_2,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1179 	  "Intel i82541PI 1000BASE-T Ethernet",
   1180 	  WM_T_82541_2,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1183 	  "Intel i82547EI 1000BASE-T Ethernet",
   1184 	  WM_T_82547,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1187 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1188 	  WM_T_82547,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1191 	  "Intel i82547GI 1000BASE-T Ethernet",
   1192 	  WM_T_82547_2,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1195 	  "Intel PRO/1000 PT (82571EB)",
   1196 	  WM_T_82571,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1199 	  "Intel PRO/1000 PF (82571EB)",
   1200 	  WM_T_82571,		WMP_F_FIBER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1203 	  "Intel PRO/1000 PB (82571EB)",
   1204 	  WM_T_82571,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1207 	  "Intel PRO/1000 QT (82571EB)",
   1208 	  WM_T_82571,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1211 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1212 	  WM_T_82571,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1215 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1216 	  WM_T_82571,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1219 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1220 	  WM_T_82571,		WMP_F_SERDES },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1223 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1224 	  WM_T_82571,		WMP_F_SERDES },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1227 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1228 	  WM_T_82571,		WMP_F_FIBER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1231 	  "Intel i82572EI 1000baseT Ethernet",
   1232 	  WM_T_82572,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1235 	  "Intel i82572EI 1000baseX Ethernet",
   1236 	  WM_T_82572,		WMP_F_FIBER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1239 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1240 	  WM_T_82572,		WMP_F_SERDES },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1243 	  "Intel i82572EI 1000baseT Ethernet",
   1244 	  WM_T_82572,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1247 	  "Intel i82573E",
   1248 	  WM_T_82573,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1251 	  "Intel i82573E IAMT",
   1252 	  WM_T_82573,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1255 	  "Intel i82573L Gigabit Ethernet",
   1256 	  WM_T_82573,		WMP_F_COPPER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1259 	  "Intel i82574L",
   1260 	  WM_T_82574,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1263 	  "Intel i82574L",
   1264 	  WM_T_82574,		WMP_F_COPPER },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1267 	  "Intel i82583V",
   1268 	  WM_T_82583,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1271 	  "i80003 dual 1000baseT Ethernet",
   1272 	  WM_T_80003,		WMP_F_COPPER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1275 	  "i80003 dual 1000baseX Ethernet",
   1276 	  WM_T_80003,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1279 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1280 	  WM_T_80003,		WMP_F_SERDES },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1283 	  "Intel i80003 1000baseT Ethernet",
   1284 	  WM_T_80003,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1287 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1288 	  WM_T_80003,		WMP_F_SERDES },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1291 	  "Intel i82801H (M_AMT) LAN Controller",
   1292 	  WM_T_ICH8,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1294 	  "Intel i82801H (AMT) LAN Controller",
   1295 	  WM_T_ICH8,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1297 	  "Intel i82801H LAN Controller",
   1298 	  WM_T_ICH8,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1300 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1301 	  WM_T_ICH8,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1303 	  "Intel i82801H (M) LAN Controller",
   1304 	  WM_T_ICH8,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1306 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1307 	  WM_T_ICH8,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1309 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1310 	  WM_T_ICH8,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1312 	  "82567V-3 LAN Controller",
   1313 	  WM_T_ICH8,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1315 	  "82801I (AMT) LAN Controller",
   1316 	  WM_T_ICH9,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1318 	  "82801I 10/100 LAN Controller",
   1319 	  WM_T_ICH9,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1321 	  "82801I (G) 10/100 LAN Controller",
   1322 	  WM_T_ICH9,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1324 	  "82801I (GT) 10/100 LAN Controller",
   1325 	  WM_T_ICH9,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1327 	  "82801I (C) LAN Controller",
   1328 	  WM_T_ICH9,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1330 	  "82801I mobile LAN Controller",
   1331 	  WM_T_ICH9,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1333 	  "82801I mobile (V) LAN Controller",
   1334 	  WM_T_ICH9,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1336 	  "82801I mobile (AMT) LAN Controller",
   1337 	  WM_T_ICH9,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1339 	  "82567LM-4 LAN Controller",
   1340 	  WM_T_ICH9,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1342 	  "82567LM-2 LAN Controller",
   1343 	  WM_T_ICH10,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1345 	  "82567LF-2 LAN Controller",
   1346 	  WM_T_ICH10,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1348 	  "82567LM-3 LAN Controller",
   1349 	  WM_T_ICH10,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1351 	  "82567LF-3 LAN Controller",
   1352 	  WM_T_ICH10,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1354 	  "82567V-2 LAN Controller",
   1355 	  WM_T_ICH10,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1357 	  "82567V-3? LAN Controller",
   1358 	  WM_T_ICH10,		WMP_F_COPPER },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1360 	  "HANKSVILLE LAN Controller",
   1361 	  WM_T_ICH10,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1363 	  "PCH LAN (82577LM) Controller",
   1364 	  WM_T_PCH,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1366 	  "PCH LAN (82577LC) Controller",
   1367 	  WM_T_PCH,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1369 	  "PCH LAN (82578DM) Controller",
   1370 	  WM_T_PCH,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1372 	  "PCH LAN (82578DC) Controller",
   1373 	  WM_T_PCH,		WMP_F_COPPER },
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1375 	  "PCH2 LAN (82579LM) Controller",
   1376 	  WM_T_PCH2,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1378 	  "PCH2 LAN (82579V) Controller",
   1379 	  WM_T_PCH2,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1381 	  "82575EB dual-1000baseT Ethernet",
   1382 	  WM_T_82575,		WMP_F_COPPER },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1384 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1385 	  WM_T_82575,		WMP_F_SERDES },
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1387 	  "82575GB quad-1000baseT Ethernet",
   1388 	  WM_T_82575,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1390 	  "82575GB quad-1000baseT Ethernet (PM)",
   1391 	  WM_T_82575,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1393 	  "82576 1000BaseT Ethernet",
   1394 	  WM_T_82576,		WMP_F_COPPER },
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1396 	  "82576 1000BaseX Ethernet",
   1397 	  WM_T_82576,		WMP_F_FIBER },
   1398 
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1400 	  "82576 gigabit Ethernet (SERDES)",
   1401 	  WM_T_82576,		WMP_F_SERDES },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1404 	  "82576 quad-1000BaseT Ethernet",
   1405 	  WM_T_82576,		WMP_F_COPPER },
   1406 
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1408 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1409 	  WM_T_82576,		WMP_F_COPPER },
   1410 
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1412 	  "82576 gigabit Ethernet",
   1413 	  WM_T_82576,		WMP_F_COPPER },
   1414 
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1416 	  "82576 gigabit Ethernet (SERDES)",
   1417 	  WM_T_82576,		WMP_F_SERDES },
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1419 	  "82576 quad-gigabit Ethernet (SERDES)",
   1420 	  WM_T_82576,		WMP_F_SERDES },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1423 	  "82580 1000BaseT Ethernet",
   1424 	  WM_T_82580,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1426 	  "82580 1000BaseX Ethernet",
   1427 	  WM_T_82580,		WMP_F_FIBER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1430 	  "82580 1000BaseT Ethernet (SERDES)",
   1431 	  WM_T_82580,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1434 	  "82580 gigabit Ethernet (SGMII)",
   1435 	  WM_T_82580,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1437 	  "82580 dual-1000BaseT Ethernet",
   1438 	  WM_T_82580,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1441 	  "82580 quad-1000BaseX Ethernet",
   1442 	  WM_T_82580,		WMP_F_FIBER },
   1443 
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1445 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1446 	  WM_T_82580,		WMP_F_COPPER },
   1447 
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1449 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1450 	  WM_T_82580,		WMP_F_SERDES },
   1451 
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1453 	  "DH89XXCC 1000BASE-KX Ethernet",
   1454 	  WM_T_82580,		WMP_F_SERDES },
   1455 
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1457 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1458 	  WM_T_82580,		WMP_F_SERDES },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1461 	  "I350 Gigabit Network Connection",
   1462 	  WM_T_I350,		WMP_F_COPPER },
   1463 
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1465 	  "I350 Gigabit Fiber Network Connection",
   1466 	  WM_T_I350,		WMP_F_FIBER },
   1467 
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1469 	  "I350 Gigabit Backplane Connection",
   1470 	  WM_T_I350,		WMP_F_SERDES },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1473 	  "I350 Quad Port Gigabit Ethernet",
   1474 	  WM_T_I350,		WMP_F_SERDES },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1477 	  "I350 Gigabit Connection",
   1478 	  WM_T_I350,		WMP_F_COPPER },
   1479 
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1481 	  "I354 Gigabit Ethernet (KX)",
   1482 	  WM_T_I354,		WMP_F_SERDES },
   1483 
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1485 	  "I354 Gigabit Ethernet (SGMII)",
   1486 	  WM_T_I354,		WMP_F_COPPER },
   1487 
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1489 	  "I354 Gigabit Ethernet (2.5G)",
   1490 	  WM_T_I354,		WMP_F_COPPER },
   1491 
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1493 	  "I210-T1 Ethernet Server Adapter",
   1494 	  WM_T_I210,		WMP_F_COPPER },
   1495 
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1497 	  "I210 Ethernet (Copper OEM)",
   1498 	  WM_T_I210,		WMP_F_COPPER },
   1499 
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1501 	  "I210 Ethernet (Copper IT)",
   1502 	  WM_T_I210,		WMP_F_COPPER },
   1503 
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1505 	  "I210 Ethernet (Copper, FLASH less)",
   1506 	  WM_T_I210,		WMP_F_COPPER },
   1507 
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1509 	  "I210 Gigabit Ethernet (Fiber)",
   1510 	  WM_T_I210,		WMP_F_FIBER },
   1511 
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1513 	  "I210 Gigabit Ethernet (SERDES)",
   1514 	  WM_T_I210,		WMP_F_SERDES },
   1515 
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1517 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1518 	  WM_T_I210,		WMP_F_SERDES },
   1519 
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1521 	  "I210 Gigabit Ethernet (SGMII)",
   1522 	  WM_T_I210,		WMP_F_COPPER },
   1523 
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1525 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1526 	  WM_T_I210,		WMP_F_COPPER },
   1527 
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1529 	  "I211 Ethernet (COPPER)",
   1530 	  WM_T_I211,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1532 	  "I217 V Ethernet Connection",
   1533 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1535 	  "I217 LM Ethernet Connection",
   1536 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1538 	  "I218 V Ethernet Connection",
   1539 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1541 	  "I218 V Ethernet Connection",
   1542 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1544 	  "I218 V Ethernet Connection",
   1545 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1547 	  "I218 LM Ethernet Connection",
   1548 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1550 	  "I218 LM Ethernet Connection",
   1551 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1553 	  "I218 LM Ethernet Connection",
   1554 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1556 	  "I219 LM Ethernet Connection",
   1557 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1559 	  "I219 LM Ethernet Connection",
   1560 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1562 	  "I219 LM Ethernet Connection",
   1563 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1565 	  "I219 LM Ethernet Connection",
   1566 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1568 	  "I219 LM Ethernet Connection",
   1569 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1571 	  "I219 LM Ethernet Connection",
   1572 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1574 	  "I219 LM Ethernet Connection",
   1575 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1577 	  "I219 LM Ethernet Connection",
   1578 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1580 	  "I219 LM Ethernet Connection",
   1581 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1583 	  "I219 LM Ethernet Connection",
   1584 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1585 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1586 	  "I219 LM Ethernet Connection",
   1587 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1589 	  "I219 LM Ethernet Connection",
   1590 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1592 	  "I219 LM Ethernet Connection",
   1593 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1595 	  "I219 LM Ethernet Connection",
   1596 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1597 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1598 	  "I219 LM Ethernet Connection",
   1599 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1601 	  "I219 V Ethernet Connection",
   1602 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1604 	  "I219 V Ethernet Connection",
   1605 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1606 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1607 	  "I219 V Ethernet Connection",
   1608 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1609 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1610 	  "I219 V Ethernet Connection",
   1611 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1613 	  "I219 V Ethernet Connection",
   1614 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1616 	  "I219 V Ethernet Connection",
   1617 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1619 	  "I219 V Ethernet Connection",
   1620 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1622 	  "I219 V Ethernet Connection",
   1623 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1625 	  "I219 V Ethernet Connection",
   1626 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1628 	  "I219 V Ethernet Connection",
   1629 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1631 	  "I219 V Ethernet Connection",
   1632 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1634 	  "I219 V Ethernet Connection",
   1635 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1637 	  "I219 V Ethernet Connection",
   1638 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1639 	{ 0,			0,
   1640 	  NULL,
   1641 	  0,			0 },
   1642 };
   1643 
   1644 /*
   1645  * Register read/write functions.
   1646  * Other than CSR_{READ|WRITE}().
   1647  */
   1648 
   1649 #if 0 /* Not currently used */
   1650 static inline uint32_t
   1651 wm_io_read(struct wm_softc *sc, int reg)
   1652 {
   1653 
   1654 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1655 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1656 }
   1657 #endif
   1658 
   1659 static inline void
   1660 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1661 {
   1662 
   1663 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1664 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1665 }
   1666 
   1667 static inline void
   1668 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1669     uint32_t data)
   1670 {
   1671 	uint32_t regval;
   1672 	int i;
   1673 
   1674 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1675 
   1676 	CSR_WRITE(sc, reg, regval);
   1677 
   1678 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1679 		delay(5);
   1680 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1681 			break;
   1682 	}
   1683 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1684 		aprint_error("%s: WARNING:"
   1685 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1686 		    device_xname(sc->sc_dev), reg);
   1687 	}
   1688 }
   1689 
   1690 static inline void
   1691 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1692 {
   1693 	wa->wa_low = htole32(v & 0xffffffffU);
   1694 	if (sizeof(bus_addr_t) == 8)
   1695 		wa->wa_high = htole32((uint64_t) v >> 32);
   1696 	else
   1697 		wa->wa_high = 0;
   1698 }
   1699 
   1700 /*
   1701  * Descriptor sync/init functions.
   1702  */
   1703 static inline void
   1704 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1705 {
   1706 	struct wm_softc *sc = txq->txq_sc;
   1707 
   1708 	/* If it will wrap around, sync to the end of the ring. */
   1709 	if ((start + num) > WM_NTXDESC(txq)) {
   1710 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1711 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1712 		    (WM_NTXDESC(txq) - start), ops);
   1713 		num -= (WM_NTXDESC(txq) - start);
   1714 		start = 0;
   1715 	}
   1716 
   1717 	/* Now sync whatever is left. */
   1718 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1719 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1720 }
   1721 
   1722 static inline void
   1723 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1724 {
   1725 	struct wm_softc *sc = rxq->rxq_sc;
   1726 
   1727 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1728 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1729 }
   1730 
   1731 static inline void
   1732 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1733 {
   1734 	struct wm_softc *sc = rxq->rxq_sc;
   1735 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1736 	struct mbuf *m = rxs->rxs_mbuf;
   1737 
   1738 	/*
   1739 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1740 	 * so that the payload after the Ethernet header is aligned
   1741 	 * to a 4-byte boundary.
   1742 
   1743 	 * XXX BRAINDAMAGE ALERT!
   1744 	 * The stupid chip uses the same size for every buffer, which
   1745 	 * is set in the Receive Control register.  We are using the 2K
   1746 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1747 	 * reason, we can't "scoot" packets longer than the standard
   1748 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1749 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1750 	 * the upper layer copy the headers.
   1751 	 */
   1752 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1753 
   1754 	if (sc->sc_type == WM_T_82574) {
   1755 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1756 		rxd->erx_data.erxd_addr =
   1757 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1758 		rxd->erx_data.erxd_dd = 0;
   1759 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1760 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1761 
   1762 		rxd->nqrx_data.nrxd_paddr =
   1763 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1764 		/* Currently, split header is not supported. */
   1765 		rxd->nqrx_data.nrxd_haddr = 0;
   1766 	} else {
   1767 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1768 
   1769 		wm_set_dma_addr(&rxd->wrx_addr,
   1770 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1771 		rxd->wrx_len = 0;
   1772 		rxd->wrx_cksum = 0;
   1773 		rxd->wrx_status = 0;
   1774 		rxd->wrx_errors = 0;
   1775 		rxd->wrx_special = 0;
   1776 	}
   1777 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1778 
   1779 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1780 }
   1781 
   1782 /*
   1783  * Device driver interface functions and commonly used functions.
   1784  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1785  */
   1786 
   1787 /* Lookup supported device table */
   1788 static const struct wm_product *
   1789 wm_lookup(const struct pci_attach_args *pa)
   1790 {
   1791 	const struct wm_product *wmp;
   1792 
   1793 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1794 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1795 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1796 			return wmp;
   1797 	}
   1798 	return NULL;
   1799 }
   1800 
   1801 /* The match function (ca_match) */
   1802 static int
   1803 wm_match(device_t parent, cfdata_t cf, void *aux)
   1804 {
   1805 	struct pci_attach_args *pa = aux;
   1806 
   1807 	if (wm_lookup(pa) != NULL)
   1808 		return 1;
   1809 
   1810 	return 0;
   1811 }
   1812 
   1813 /* The attach function (ca_attach) */
   1814 static void
   1815 wm_attach(device_t parent, device_t self, void *aux)
   1816 {
   1817 	struct wm_softc *sc = device_private(self);
   1818 	struct pci_attach_args *pa = aux;
   1819 	prop_dictionary_t dict;
   1820 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1821 	pci_chipset_tag_t pc = pa->pa_pc;
   1822 	int counts[PCI_INTR_TYPE_SIZE];
   1823 	pci_intr_type_t max_type;
   1824 	const char *eetype, *xname;
   1825 	bus_space_tag_t memt;
   1826 	bus_space_handle_t memh;
   1827 	bus_size_t memsize;
   1828 	int memh_valid;
   1829 	int i, error;
   1830 	const struct wm_product *wmp;
   1831 	prop_data_t ea;
   1832 	prop_number_t pn;
   1833 	uint8_t enaddr[ETHER_ADDR_LEN];
   1834 	char buf[256];
   1835 	char wqname[MAXCOMLEN];
   1836 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1837 	pcireg_t preg, memtype;
   1838 	uint16_t eeprom_data, apme_mask;
   1839 	bool force_clear_smbi;
   1840 	uint32_t link_mode;
   1841 	uint32_t reg;
   1842 
   1843 	sc->sc_dev = self;
   1844 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1845 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1846 	sc->sc_core_stopping = false;
   1847 
   1848 	wmp = wm_lookup(pa);
   1849 #ifdef DIAGNOSTIC
   1850 	if (wmp == NULL) {
   1851 		printf("\n");
   1852 		panic("wm_attach: impossible");
   1853 	}
   1854 #endif
   1855 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1856 
   1857 	sc->sc_pc = pa->pa_pc;
   1858 	sc->sc_pcitag = pa->pa_tag;
   1859 
   1860 	if (pci_dma64_available(pa))
   1861 		sc->sc_dmat = pa->pa_dmat64;
   1862 	else
   1863 		sc->sc_dmat = pa->pa_dmat;
   1864 
   1865 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1866 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1867 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1868 
   1869 	sc->sc_type = wmp->wmp_type;
   1870 
   1871 	/* Set default function pointers */
   1872 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1873 	sc->phy.release = sc->nvm.release = wm_put_null;
   1874 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1875 
   1876 	if (sc->sc_type < WM_T_82543) {
   1877 		if (sc->sc_rev < 2) {
   1878 			aprint_error_dev(sc->sc_dev,
   1879 			    "i82542 must be at least rev. 2\n");
   1880 			return;
   1881 		}
   1882 		if (sc->sc_rev < 3)
   1883 			sc->sc_type = WM_T_82542_2_0;
   1884 	}
   1885 
   1886 	/*
   1887 	 * Disable MSI for Errata:
   1888 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1889 	 *
   1890 	 *  82544: Errata 25
   1891 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1892 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1893 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1894 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1895 	 *
   1896 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1897 	 *
   1898 	 *  82571 & 82572: Errata 63
   1899 	 */
   1900 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1901 	    || (sc->sc_type == WM_T_82572))
   1902 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1903 
   1904 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1905 	    || (sc->sc_type == WM_T_82580)
   1906 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1907 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1908 		sc->sc_flags |= WM_F_NEWQUEUE;
   1909 
   1910 	/* Set device properties (mactype) */
   1911 	dict = device_properties(sc->sc_dev);
   1912 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1913 
   1914 	/*
   1915 	 * Map the device.  All devices support memory-mapped acccess,
   1916 	 * and it is really required for normal operation.
   1917 	 */
   1918 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1919 	switch (memtype) {
   1920 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1921 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1922 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1923 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1924 		break;
   1925 	default:
   1926 		memh_valid = 0;
   1927 		break;
   1928 	}
   1929 
   1930 	if (memh_valid) {
   1931 		sc->sc_st = memt;
   1932 		sc->sc_sh = memh;
   1933 		sc->sc_ss = memsize;
   1934 	} else {
   1935 		aprint_error_dev(sc->sc_dev,
   1936 		    "unable to map device registers\n");
   1937 		return;
   1938 	}
   1939 
   1940 	/*
   1941 	 * In addition, i82544 and later support I/O mapped indirect
   1942 	 * register access.  It is not desirable (nor supported in
   1943 	 * this driver) to use it for normal operation, though it is
   1944 	 * required to work around bugs in some chip versions.
   1945 	 */
   1946 	if (sc->sc_type >= WM_T_82544) {
   1947 		/* First we have to find the I/O BAR. */
   1948 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1949 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1950 			if (memtype == PCI_MAPREG_TYPE_IO)
   1951 				break;
   1952 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1953 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1954 				i += 4;	/* skip high bits, too */
   1955 		}
   1956 		if (i < PCI_MAPREG_END) {
   1957 			/*
   1958 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1959 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1960 			 * It's no problem because newer chips has no this
   1961 			 * bug.
   1962 			 *
   1963 			 * The i8254x doesn't apparently respond when the
   1964 			 * I/O BAR is 0, which looks somewhat like it's not
   1965 			 * been configured.
   1966 			 */
   1967 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1968 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1969 				aprint_error_dev(sc->sc_dev,
   1970 				    "WARNING: I/O BAR at zero.\n");
   1971 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1972 					0, &sc->sc_iot, &sc->sc_ioh,
   1973 					NULL, &sc->sc_ios) == 0) {
   1974 				sc->sc_flags |= WM_F_IOH_VALID;
   1975 			} else
   1976 				aprint_error_dev(sc->sc_dev,
   1977 				    "WARNING: unable to map I/O space\n");
   1978 		}
   1979 
   1980 	}
   1981 
   1982 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1983 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1984 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1985 	if (sc->sc_type < WM_T_82542_2_1)
   1986 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1987 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1988 
   1989 	/* Power up chip */
   1990 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1991 	    && error != EOPNOTSUPP) {
   1992 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1993 		return;
   1994 	}
   1995 
   1996 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1997 	/*
   1998 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1999 	 * resource.
   2000 	 */
   2001 	if (sc->sc_nqueues > 1) {
   2002 		max_type = PCI_INTR_TYPE_MSIX;
   2003 		/*
   2004 		 *  82583 has a MSI-X capability in the PCI configuration space
   2005 		 * but it doesn't support it. At least the document doesn't
   2006 		 * say anything about MSI-X.
   2007 		 */
   2008 		counts[PCI_INTR_TYPE_MSIX]
   2009 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2010 	} else {
   2011 		max_type = PCI_INTR_TYPE_MSI;
   2012 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2013 	}
   2014 
   2015 	/* Allocation settings */
   2016 	counts[PCI_INTR_TYPE_MSI] = 1;
   2017 	counts[PCI_INTR_TYPE_INTX] = 1;
   2018 	/* overridden by disable flags */
   2019 	if (wm_disable_msi != 0) {
   2020 		counts[PCI_INTR_TYPE_MSI] = 0;
   2021 		if (wm_disable_msix != 0) {
   2022 			max_type = PCI_INTR_TYPE_INTX;
   2023 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2024 		}
   2025 	} else if (wm_disable_msix != 0) {
   2026 		max_type = PCI_INTR_TYPE_MSI;
   2027 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2028 	}
   2029 
   2030 alloc_retry:
   2031 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2032 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2033 		return;
   2034 	}
   2035 
   2036 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2037 		error = wm_setup_msix(sc);
   2038 		if (error) {
   2039 			pci_intr_release(pc, sc->sc_intrs,
   2040 			    counts[PCI_INTR_TYPE_MSIX]);
   2041 
   2042 			/* Setup for MSI: Disable MSI-X */
   2043 			max_type = PCI_INTR_TYPE_MSI;
   2044 			counts[PCI_INTR_TYPE_MSI] = 1;
   2045 			counts[PCI_INTR_TYPE_INTX] = 1;
   2046 			goto alloc_retry;
   2047 		}
   2048 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2049 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2050 		error = wm_setup_legacy(sc);
   2051 		if (error) {
   2052 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2053 			    counts[PCI_INTR_TYPE_MSI]);
   2054 
   2055 			/* The next try is for INTx: Disable MSI */
   2056 			max_type = PCI_INTR_TYPE_INTX;
   2057 			counts[PCI_INTR_TYPE_INTX] = 1;
   2058 			goto alloc_retry;
   2059 		}
   2060 	} else {
   2061 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2062 		error = wm_setup_legacy(sc);
   2063 		if (error) {
   2064 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2065 			    counts[PCI_INTR_TYPE_INTX]);
   2066 			return;
   2067 		}
   2068 	}
   2069 
   2070 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2071 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2072 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2073 	    WM_WORKQUEUE_FLAGS);
   2074 	if (error) {
   2075 		aprint_error_dev(sc->sc_dev,
   2076 		    "unable to create workqueue\n");
   2077 		goto out;
   2078 	}
   2079 
   2080 	/*
   2081 	 * Check the function ID (unit number of the chip).
   2082 	 */
   2083 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2084 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2085 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2086 	    || (sc->sc_type == WM_T_82580)
   2087 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2088 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2089 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2090 	else
   2091 		sc->sc_funcid = 0;
   2092 
   2093 	/*
   2094 	 * Determine a few things about the bus we're connected to.
   2095 	 */
   2096 	if (sc->sc_type < WM_T_82543) {
   2097 		/* We don't really know the bus characteristics here. */
   2098 		sc->sc_bus_speed = 33;
   2099 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2100 		/*
   2101 		 * CSA (Communication Streaming Architecture) is about as fast
   2102 		 * a 32-bit 66MHz PCI Bus.
   2103 		 */
   2104 		sc->sc_flags |= WM_F_CSA;
   2105 		sc->sc_bus_speed = 66;
   2106 		aprint_verbose_dev(sc->sc_dev,
   2107 		    "Communication Streaming Architecture\n");
   2108 		if (sc->sc_type == WM_T_82547) {
   2109 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2110 			callout_setfunc(&sc->sc_txfifo_ch,
   2111 			    wm_82547_txfifo_stall, sc);
   2112 			aprint_verbose_dev(sc->sc_dev,
   2113 			    "using 82547 Tx FIFO stall work-around\n");
   2114 		}
   2115 	} else if (sc->sc_type >= WM_T_82571) {
   2116 		sc->sc_flags |= WM_F_PCIE;
   2117 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2118 		    && (sc->sc_type != WM_T_ICH10)
   2119 		    && (sc->sc_type != WM_T_PCH)
   2120 		    && (sc->sc_type != WM_T_PCH2)
   2121 		    && (sc->sc_type != WM_T_PCH_LPT)
   2122 		    && (sc->sc_type != WM_T_PCH_SPT)
   2123 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2124 			/* ICH* and PCH* have no PCIe capability registers */
   2125 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2126 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2127 				NULL) == 0)
   2128 				aprint_error_dev(sc->sc_dev,
   2129 				    "unable to find PCIe capability\n");
   2130 		}
   2131 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2132 	} else {
   2133 		reg = CSR_READ(sc, WMREG_STATUS);
   2134 		if (reg & STATUS_BUS64)
   2135 			sc->sc_flags |= WM_F_BUS64;
   2136 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2137 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2138 
   2139 			sc->sc_flags |= WM_F_PCIX;
   2140 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2141 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2142 				aprint_error_dev(sc->sc_dev,
   2143 				    "unable to find PCIX capability\n");
   2144 			else if (sc->sc_type != WM_T_82545_3 &&
   2145 				 sc->sc_type != WM_T_82546_3) {
   2146 				/*
   2147 				 * Work around a problem caused by the BIOS
   2148 				 * setting the max memory read byte count
   2149 				 * incorrectly.
   2150 				 */
   2151 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2152 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2153 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2154 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2155 
   2156 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2157 				    PCIX_CMD_BYTECNT_SHIFT;
   2158 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2159 				    PCIX_STATUS_MAXB_SHIFT;
   2160 				if (bytecnt > maxb) {
   2161 					aprint_verbose_dev(sc->sc_dev,
   2162 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2163 					    512 << bytecnt, 512 << maxb);
   2164 					pcix_cmd = (pcix_cmd &
   2165 					    ~PCIX_CMD_BYTECNT_MASK) |
   2166 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2167 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2168 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2169 					    pcix_cmd);
   2170 				}
   2171 			}
   2172 		}
   2173 		/*
   2174 		 * The quad port adapter is special; it has a PCIX-PCIX
   2175 		 * bridge on the board, and can run the secondary bus at
   2176 		 * a higher speed.
   2177 		 */
   2178 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2179 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2180 								      : 66;
   2181 		} else if (sc->sc_flags & WM_F_PCIX) {
   2182 			switch (reg & STATUS_PCIXSPD_MASK) {
   2183 			case STATUS_PCIXSPD_50_66:
   2184 				sc->sc_bus_speed = 66;
   2185 				break;
   2186 			case STATUS_PCIXSPD_66_100:
   2187 				sc->sc_bus_speed = 100;
   2188 				break;
   2189 			case STATUS_PCIXSPD_100_133:
   2190 				sc->sc_bus_speed = 133;
   2191 				break;
   2192 			default:
   2193 				aprint_error_dev(sc->sc_dev,
   2194 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2195 				    reg & STATUS_PCIXSPD_MASK);
   2196 				sc->sc_bus_speed = 66;
   2197 				break;
   2198 			}
   2199 		} else
   2200 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2201 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2202 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2203 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2204 	}
   2205 
   2206 	/* clear interesting stat counters */
   2207 	CSR_READ(sc, WMREG_COLC);
   2208 	CSR_READ(sc, WMREG_RXERRC);
   2209 
   2210 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2211 	    || (sc->sc_type >= WM_T_ICH8))
   2212 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2213 	if (sc->sc_type >= WM_T_ICH8)
   2214 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2215 
   2216 	/* Set PHY, NVM mutex related stuff */
   2217 	switch (sc->sc_type) {
   2218 	case WM_T_82542_2_0:
   2219 	case WM_T_82542_2_1:
   2220 	case WM_T_82543:
   2221 	case WM_T_82544:
   2222 		/* Microwire */
   2223 		sc->nvm.read = wm_nvm_read_uwire;
   2224 		sc->sc_nvm_wordsize = 64;
   2225 		sc->sc_nvm_addrbits = 6;
   2226 		break;
   2227 	case WM_T_82540:
   2228 	case WM_T_82545:
   2229 	case WM_T_82545_3:
   2230 	case WM_T_82546:
   2231 	case WM_T_82546_3:
   2232 		/* Microwire */
   2233 		sc->nvm.read = wm_nvm_read_uwire;
   2234 		reg = CSR_READ(sc, WMREG_EECD);
   2235 		if (reg & EECD_EE_SIZE) {
   2236 			sc->sc_nvm_wordsize = 256;
   2237 			sc->sc_nvm_addrbits = 8;
   2238 		} else {
   2239 			sc->sc_nvm_wordsize = 64;
   2240 			sc->sc_nvm_addrbits = 6;
   2241 		}
   2242 		sc->sc_flags |= WM_F_LOCK_EECD;
   2243 		sc->nvm.acquire = wm_get_eecd;
   2244 		sc->nvm.release = wm_put_eecd;
   2245 		break;
   2246 	case WM_T_82541:
   2247 	case WM_T_82541_2:
   2248 	case WM_T_82547:
   2249 	case WM_T_82547_2:
   2250 		reg = CSR_READ(sc, WMREG_EECD);
   2251 		/*
   2252 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2253 		 * on 8254[17], so set flags and functios before calling it.
   2254 		 */
   2255 		sc->sc_flags |= WM_F_LOCK_EECD;
   2256 		sc->nvm.acquire = wm_get_eecd;
   2257 		sc->nvm.release = wm_put_eecd;
   2258 		if (reg & EECD_EE_TYPE) {
   2259 			/* SPI */
   2260 			sc->nvm.read = wm_nvm_read_spi;
   2261 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2262 			wm_nvm_set_addrbits_size_eecd(sc);
   2263 		} else {
   2264 			/* Microwire */
   2265 			sc->nvm.read = wm_nvm_read_uwire;
   2266 			if ((reg & EECD_EE_ABITS) != 0) {
   2267 				sc->sc_nvm_wordsize = 256;
   2268 				sc->sc_nvm_addrbits = 8;
   2269 			} else {
   2270 				sc->sc_nvm_wordsize = 64;
   2271 				sc->sc_nvm_addrbits = 6;
   2272 			}
   2273 		}
   2274 		break;
   2275 	case WM_T_82571:
   2276 	case WM_T_82572:
   2277 		/* SPI */
   2278 		sc->nvm.read = wm_nvm_read_eerd;
   2279 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2280 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2281 		wm_nvm_set_addrbits_size_eecd(sc);
   2282 		sc->phy.acquire = wm_get_swsm_semaphore;
   2283 		sc->phy.release = wm_put_swsm_semaphore;
   2284 		sc->nvm.acquire = wm_get_nvm_82571;
   2285 		sc->nvm.release = wm_put_nvm_82571;
   2286 		break;
   2287 	case WM_T_82573:
   2288 	case WM_T_82574:
   2289 	case WM_T_82583:
   2290 		sc->nvm.read = wm_nvm_read_eerd;
   2291 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2292 		if (sc->sc_type == WM_T_82573) {
   2293 			sc->phy.acquire = wm_get_swsm_semaphore;
   2294 			sc->phy.release = wm_put_swsm_semaphore;
   2295 			sc->nvm.acquire = wm_get_nvm_82571;
   2296 			sc->nvm.release = wm_put_nvm_82571;
   2297 		} else {
   2298 			/* Both PHY and NVM use the same semaphore. */
   2299 			sc->phy.acquire = sc->nvm.acquire
   2300 			    = wm_get_swfwhw_semaphore;
   2301 			sc->phy.release = sc->nvm.release
   2302 			    = wm_put_swfwhw_semaphore;
   2303 		}
   2304 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2305 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2306 			sc->sc_nvm_wordsize = 2048;
   2307 		} else {
   2308 			/* SPI */
   2309 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2310 			wm_nvm_set_addrbits_size_eecd(sc);
   2311 		}
   2312 		break;
   2313 	case WM_T_82575:
   2314 	case WM_T_82576:
   2315 	case WM_T_82580:
   2316 	case WM_T_I350:
   2317 	case WM_T_I354:
   2318 	case WM_T_80003:
   2319 		/* SPI */
   2320 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2321 		wm_nvm_set_addrbits_size_eecd(sc);
   2322 		if ((sc->sc_type == WM_T_80003)
   2323 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2324 			sc->nvm.read = wm_nvm_read_eerd;
   2325 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2326 		} else {
   2327 			sc->nvm.read = wm_nvm_read_spi;
   2328 			sc->sc_flags |= WM_F_LOCK_EECD;
   2329 		}
   2330 		sc->phy.acquire = wm_get_phy_82575;
   2331 		sc->phy.release = wm_put_phy_82575;
   2332 		sc->nvm.acquire = wm_get_nvm_80003;
   2333 		sc->nvm.release = wm_put_nvm_80003;
   2334 		break;
   2335 	case WM_T_ICH8:
   2336 	case WM_T_ICH9:
   2337 	case WM_T_ICH10:
   2338 	case WM_T_PCH:
   2339 	case WM_T_PCH2:
   2340 	case WM_T_PCH_LPT:
   2341 		sc->nvm.read = wm_nvm_read_ich8;
   2342 		/* FLASH */
   2343 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2344 		sc->sc_nvm_wordsize = 2048;
   2345 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2346 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2347 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2348 			aprint_error_dev(sc->sc_dev,
   2349 			    "can't map FLASH registers\n");
   2350 			goto out;
   2351 		}
   2352 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2353 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2354 		    ICH_FLASH_SECTOR_SIZE;
   2355 		sc->sc_ich8_flash_bank_size =
   2356 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2357 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2358 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2359 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2360 		sc->sc_flashreg_offset = 0;
   2361 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2362 		sc->phy.release = wm_put_swflag_ich8lan;
   2363 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2364 		sc->nvm.release = wm_put_nvm_ich8lan;
   2365 		break;
   2366 	case WM_T_PCH_SPT:
   2367 	case WM_T_PCH_CNP:
   2368 		sc->nvm.read = wm_nvm_read_spt;
   2369 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2370 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2371 		sc->sc_flasht = sc->sc_st;
   2372 		sc->sc_flashh = sc->sc_sh;
   2373 		sc->sc_ich8_flash_base = 0;
   2374 		sc->sc_nvm_wordsize =
   2375 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2376 		    * NVM_SIZE_MULTIPLIER;
   2377 		/* It is size in bytes, we want words */
   2378 		sc->sc_nvm_wordsize /= 2;
   2379 		/* Assume 2 banks */
   2380 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2381 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2382 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2383 		sc->phy.release = wm_put_swflag_ich8lan;
   2384 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2385 		sc->nvm.release = wm_put_nvm_ich8lan;
   2386 		break;
   2387 	case WM_T_I210:
   2388 	case WM_T_I211:
   2389 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2390 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2391 		if (wm_nvm_flash_presence_i210(sc)) {
   2392 			sc->nvm.read = wm_nvm_read_eerd;
   2393 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2394 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2395 			wm_nvm_set_addrbits_size_eecd(sc);
   2396 		} else {
   2397 			sc->nvm.read = wm_nvm_read_invm;
   2398 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2399 			sc->sc_nvm_wordsize = INVM_SIZE;
   2400 		}
   2401 		sc->phy.acquire = wm_get_phy_82575;
   2402 		sc->phy.release = wm_put_phy_82575;
   2403 		sc->nvm.acquire = wm_get_nvm_80003;
   2404 		sc->nvm.release = wm_put_nvm_80003;
   2405 		break;
   2406 	default:
   2407 		break;
   2408 	}
   2409 
   2410 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2411 	switch (sc->sc_type) {
   2412 	case WM_T_82571:
   2413 	case WM_T_82572:
   2414 		reg = CSR_READ(sc, WMREG_SWSM2);
   2415 		if ((reg & SWSM2_LOCK) == 0) {
   2416 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2417 			force_clear_smbi = true;
   2418 		} else
   2419 			force_clear_smbi = false;
   2420 		break;
   2421 	case WM_T_82573:
   2422 	case WM_T_82574:
   2423 	case WM_T_82583:
   2424 		force_clear_smbi = true;
   2425 		break;
   2426 	default:
   2427 		force_clear_smbi = false;
   2428 		break;
   2429 	}
   2430 	if (force_clear_smbi) {
   2431 		reg = CSR_READ(sc, WMREG_SWSM);
   2432 		if ((reg & SWSM_SMBI) != 0)
   2433 			aprint_error_dev(sc->sc_dev,
   2434 			    "Please update the Bootagent\n");
   2435 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2436 	}
   2437 
   2438 	/*
   2439 	 * Defer printing the EEPROM type until after verifying the checksum
   2440 	 * This allows the EEPROM type to be printed correctly in the case
   2441 	 * that no EEPROM is attached.
   2442 	 */
   2443 	/*
   2444 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2445 	 * this for later, so we can fail future reads from the EEPROM.
   2446 	 */
   2447 	if (wm_nvm_validate_checksum(sc)) {
   2448 		/*
   2449 		 * Read twice again because some PCI-e parts fail the
   2450 		 * first check due to the link being in sleep state.
   2451 		 */
   2452 		if (wm_nvm_validate_checksum(sc))
   2453 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2454 	}
   2455 
   2456 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2457 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2458 	else {
   2459 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2460 		    sc->sc_nvm_wordsize);
   2461 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2462 			aprint_verbose("iNVM");
   2463 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2464 			aprint_verbose("FLASH(HW)");
   2465 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2466 			aprint_verbose("FLASH");
   2467 		else {
   2468 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2469 				eetype = "SPI";
   2470 			else
   2471 				eetype = "MicroWire";
   2472 			aprint_verbose("(%d address bits) %s EEPROM",
   2473 			    sc->sc_nvm_addrbits, eetype);
   2474 		}
   2475 	}
   2476 	wm_nvm_version(sc);
   2477 	aprint_verbose("\n");
   2478 
   2479 	/*
   2480 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2481 	 * incorrect.
   2482 	 */
   2483 	wm_gmii_setup_phytype(sc, 0, 0);
   2484 
   2485 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2486 	switch (sc->sc_type) {
   2487 	case WM_T_ICH8:
   2488 	case WM_T_ICH9:
   2489 	case WM_T_ICH10:
   2490 	case WM_T_PCH:
   2491 	case WM_T_PCH2:
   2492 	case WM_T_PCH_LPT:
   2493 	case WM_T_PCH_SPT:
   2494 	case WM_T_PCH_CNP:
   2495 		apme_mask = WUC_APME;
   2496 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2497 		if ((eeprom_data & apme_mask) != 0)
   2498 			sc->sc_flags |= WM_F_WOL;
   2499 		break;
   2500 	default:
   2501 		break;
   2502 	}
   2503 
   2504 	/* Reset the chip to a known state. */
   2505 	wm_reset(sc);
   2506 
   2507 	/*
   2508 	 * Check for I21[01] PLL workaround.
   2509 	 *
   2510 	 * Three cases:
   2511 	 * a) Chip is I211.
   2512 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2513 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2514 	 */
   2515 	if (sc->sc_type == WM_T_I211)
   2516 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2517 	if (sc->sc_type == WM_T_I210) {
   2518 		if (!wm_nvm_flash_presence_i210(sc))
   2519 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2520 		else if ((sc->sc_nvm_ver_major < 3)
   2521 		    || ((sc->sc_nvm_ver_major == 3)
   2522 			&& (sc->sc_nvm_ver_minor < 25))) {
   2523 			aprint_verbose_dev(sc->sc_dev,
   2524 			    "ROM image version %d.%d is older than 3.25\n",
   2525 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2526 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2527 		}
   2528 	}
   2529 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2530 		wm_pll_workaround_i210(sc);
   2531 
   2532 	wm_get_wakeup(sc);
   2533 
   2534 	/* Non-AMT based hardware can now take control from firmware */
   2535 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2536 		wm_get_hw_control(sc);
   2537 
   2538 	/*
   2539 	 * Read the Ethernet address from the EEPROM, if not first found
   2540 	 * in device properties.
   2541 	 */
   2542 	ea = prop_dictionary_get(dict, "mac-address");
   2543 	if (ea != NULL) {
   2544 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2545 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2546 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2547 	} else {
   2548 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2549 			aprint_error_dev(sc->sc_dev,
   2550 			    "unable to read Ethernet address\n");
   2551 			goto out;
   2552 		}
   2553 	}
   2554 
   2555 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2556 	    ether_sprintf(enaddr));
   2557 
   2558 	/*
   2559 	 * Read the config info from the EEPROM, and set up various
   2560 	 * bits in the control registers based on their contents.
   2561 	 */
   2562 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2563 	if (pn != NULL) {
   2564 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2565 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2566 	} else {
   2567 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2568 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2569 			goto out;
   2570 		}
   2571 	}
   2572 
   2573 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2574 	if (pn != NULL) {
   2575 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2576 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2577 	} else {
   2578 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2579 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2580 			goto out;
   2581 		}
   2582 	}
   2583 
   2584 	/* check for WM_F_WOL */
   2585 	switch (sc->sc_type) {
   2586 	case WM_T_82542_2_0:
   2587 	case WM_T_82542_2_1:
   2588 	case WM_T_82543:
   2589 		/* dummy? */
   2590 		eeprom_data = 0;
   2591 		apme_mask = NVM_CFG3_APME;
   2592 		break;
   2593 	case WM_T_82544:
   2594 		apme_mask = NVM_CFG2_82544_APM_EN;
   2595 		eeprom_data = cfg2;
   2596 		break;
   2597 	case WM_T_82546:
   2598 	case WM_T_82546_3:
   2599 	case WM_T_82571:
   2600 	case WM_T_82572:
   2601 	case WM_T_82573:
   2602 	case WM_T_82574:
   2603 	case WM_T_82583:
   2604 	case WM_T_80003:
   2605 	case WM_T_82575:
   2606 	case WM_T_82576:
   2607 		apme_mask = NVM_CFG3_APME;
   2608 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2609 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2610 		break;
   2611 	case WM_T_82580:
   2612 	case WM_T_I350:
   2613 	case WM_T_I354:
   2614 	case WM_T_I210:
   2615 	case WM_T_I211:
   2616 		apme_mask = NVM_CFG3_APME;
   2617 		wm_nvm_read(sc,
   2618 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2619 		    1, &eeprom_data);
   2620 		break;
   2621 	case WM_T_ICH8:
   2622 	case WM_T_ICH9:
   2623 	case WM_T_ICH10:
   2624 	case WM_T_PCH:
   2625 	case WM_T_PCH2:
   2626 	case WM_T_PCH_LPT:
   2627 	case WM_T_PCH_SPT:
   2628 	case WM_T_PCH_CNP:
   2629 		/* Already checked before wm_reset () */
   2630 		apme_mask = eeprom_data = 0;
   2631 		break;
   2632 	default: /* XXX 82540 */
   2633 		apme_mask = NVM_CFG3_APME;
   2634 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2635 		break;
   2636 	}
   2637 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2638 	if ((eeprom_data & apme_mask) != 0)
   2639 		sc->sc_flags |= WM_F_WOL;
   2640 
   2641 	/*
   2642 	 * We have the eeprom settings, now apply the special cases
   2643 	 * where the eeprom may be wrong or the board won't support
   2644 	 * wake on lan on a particular port
   2645 	 */
   2646 	switch (sc->sc_pcidevid) {
   2647 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2648 		sc->sc_flags &= ~WM_F_WOL;
   2649 		break;
   2650 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2651 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2652 		/* Wake events only supported on port A for dual fiber
   2653 		 * regardless of eeprom setting */
   2654 		if (sc->sc_funcid == 1)
   2655 			sc->sc_flags &= ~WM_F_WOL;
   2656 		break;
   2657 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2658 		/* If quad port adapter, disable WoL on all but port A */
   2659 		if (sc->sc_funcid != 0)
   2660 			sc->sc_flags &= ~WM_F_WOL;
   2661 		break;
   2662 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2663 		/* Wake events only supported on port A for dual fiber
   2664 		 * regardless of eeprom setting */
   2665 		if (sc->sc_funcid == 1)
   2666 			sc->sc_flags &= ~WM_F_WOL;
   2667 		break;
   2668 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2669 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2670 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2671 		/* If quad port adapter, disable WoL on all but port A */
   2672 		if (sc->sc_funcid != 0)
   2673 			sc->sc_flags &= ~WM_F_WOL;
   2674 		break;
   2675 	}
   2676 
   2677 	if (sc->sc_type >= WM_T_82575) {
   2678 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2679 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2680 			    nvmword);
   2681 			if ((sc->sc_type == WM_T_82575) ||
   2682 			    (sc->sc_type == WM_T_82576)) {
   2683 				/* Check NVM for autonegotiation */
   2684 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2685 				    != 0)
   2686 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2687 			}
   2688 			if ((sc->sc_type == WM_T_82575) ||
   2689 			    (sc->sc_type == WM_T_I350)) {
   2690 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2691 					sc->sc_flags |= WM_F_MAS;
   2692 			}
   2693 		}
   2694 	}
   2695 
   2696 	/*
   2697 	 * XXX need special handling for some multiple port cards
   2698 	 * to disable a paticular port.
   2699 	 */
   2700 
   2701 	if (sc->sc_type >= WM_T_82544) {
   2702 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2703 		if (pn != NULL) {
   2704 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2705 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2706 		} else {
   2707 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2708 				aprint_error_dev(sc->sc_dev,
   2709 				    "unable to read SWDPIN\n");
   2710 				goto out;
   2711 			}
   2712 		}
   2713 	}
   2714 
   2715 	if (cfg1 & NVM_CFG1_ILOS)
   2716 		sc->sc_ctrl |= CTRL_ILOS;
   2717 
   2718 	/*
   2719 	 * XXX
   2720 	 * This code isn't correct because pin 2 and 3 are located
   2721 	 * in different position on newer chips. Check all datasheet.
   2722 	 *
   2723 	 * Until resolve this problem, check if a chip < 82580
   2724 	 */
   2725 	if (sc->sc_type <= WM_T_82580) {
   2726 		if (sc->sc_type >= WM_T_82544) {
   2727 			sc->sc_ctrl |=
   2728 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2729 			    CTRL_SWDPIO_SHIFT;
   2730 			sc->sc_ctrl |=
   2731 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2732 			    CTRL_SWDPINS_SHIFT;
   2733 		} else {
   2734 			sc->sc_ctrl |=
   2735 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2736 			    CTRL_SWDPIO_SHIFT;
   2737 		}
   2738 	}
   2739 
   2740 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2741 		wm_nvm_read(sc,
   2742 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2743 		    1, &nvmword);
   2744 		if (nvmword & NVM_CFG3_ILOS)
   2745 			sc->sc_ctrl |= CTRL_ILOS;
   2746 	}
   2747 
   2748 #if 0
   2749 	if (sc->sc_type >= WM_T_82544) {
   2750 		if (cfg1 & NVM_CFG1_IPS0)
   2751 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2752 		if (cfg1 & NVM_CFG1_IPS1)
   2753 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2754 		sc->sc_ctrl_ext |=
   2755 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2756 		    CTRL_EXT_SWDPIO_SHIFT;
   2757 		sc->sc_ctrl_ext |=
   2758 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2759 		    CTRL_EXT_SWDPINS_SHIFT;
   2760 	} else {
   2761 		sc->sc_ctrl_ext |=
   2762 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2763 		    CTRL_EXT_SWDPIO_SHIFT;
   2764 	}
   2765 #endif
   2766 
   2767 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2768 #if 0
   2769 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2770 #endif
   2771 
   2772 	if (sc->sc_type == WM_T_PCH) {
   2773 		uint16_t val;
   2774 
   2775 		/* Save the NVM K1 bit setting */
   2776 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2777 
   2778 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2779 			sc->sc_nvm_k1_enabled = 1;
   2780 		else
   2781 			sc->sc_nvm_k1_enabled = 0;
   2782 	}
   2783 
   2784 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2785 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2786 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2787 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2788 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2789 	    || sc->sc_type == WM_T_82573
   2790 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2791 		/* Copper only */
   2792 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2793 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2794 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2795 	    || (sc->sc_type ==WM_T_I211)) {
   2796 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2797 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2798 		switch (link_mode) {
   2799 		case CTRL_EXT_LINK_MODE_1000KX:
   2800 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2801 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2802 			break;
   2803 		case CTRL_EXT_LINK_MODE_SGMII:
   2804 			if (wm_sgmii_uses_mdio(sc)) {
   2805 				aprint_normal_dev(sc->sc_dev,
   2806 				    "SGMII(MDIO)\n");
   2807 				sc->sc_flags |= WM_F_SGMII;
   2808 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2809 				break;
   2810 			}
   2811 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2812 			/*FALLTHROUGH*/
   2813 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2814 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2815 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2816 				if (link_mode
   2817 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2818 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2819 					sc->sc_flags |= WM_F_SGMII;
   2820 					aprint_verbose_dev(sc->sc_dev,
   2821 					    "SGMII\n");
   2822 				} else {
   2823 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2824 					aprint_verbose_dev(sc->sc_dev,
   2825 					    "SERDES\n");
   2826 				}
   2827 				break;
   2828 			}
   2829 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2830 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2831 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2832 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2833 				sc->sc_flags |= WM_F_SGMII;
   2834 			}
   2835 			/* Do not change link mode for 100BaseFX */
   2836 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2837 				break;
   2838 
   2839 			/* Change current link mode setting */
   2840 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2841 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2842 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2843 			else
   2844 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2845 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2846 			break;
   2847 		case CTRL_EXT_LINK_MODE_GMII:
   2848 		default:
   2849 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2850 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2851 			break;
   2852 		}
   2853 
   2854 		reg &= ~CTRL_EXT_I2C_ENA;
   2855 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2856 			reg |= CTRL_EXT_I2C_ENA;
   2857 		else
   2858 			reg &= ~CTRL_EXT_I2C_ENA;
   2859 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2860 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2861 			wm_gmii_setup_phytype(sc, 0, 0);
   2862 			wm_reset_mdicnfg_82580(sc);
   2863 		}
   2864 	} else if (sc->sc_type < WM_T_82543 ||
   2865 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2866 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2867 			aprint_error_dev(sc->sc_dev,
   2868 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2869 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2870 		}
   2871 	} else {
   2872 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2873 			aprint_error_dev(sc->sc_dev,
   2874 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2875 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2876 		}
   2877 	}
   2878 
   2879 	if (sc->sc_type >= WM_T_PCH2)
   2880 		sc->sc_flags |= WM_F_EEE;
   2881 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2882 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2883 		/* XXX: Need special handling for I354. (not yet) */
   2884 		if (sc->sc_type != WM_T_I354)
   2885 			sc->sc_flags |= WM_F_EEE;
   2886 	}
   2887 
   2888 	/* Set device properties (macflags) */
   2889 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2890 
   2891 	if (sc->sc_flags != 0) {
   2892 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2893 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2894 	}
   2895 
   2896 #ifdef WM_MPSAFE
   2897 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2898 #else
   2899 	sc->sc_core_lock = NULL;
   2900 #endif
   2901 
   2902 	/* Initialize the media structures accordingly. */
   2903 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2904 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2905 	else
   2906 		wm_tbi_mediainit(sc); /* All others */
   2907 
   2908 	ifp = &sc->sc_ethercom.ec_if;
   2909 	xname = device_xname(sc->sc_dev);
   2910 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2911 	ifp->if_softc = sc;
   2912 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2913 #ifdef WM_MPSAFE
   2914 	ifp->if_extflags = IFEF_MPSAFE;
   2915 #endif
   2916 	ifp->if_ioctl = wm_ioctl;
   2917 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2918 		ifp->if_start = wm_nq_start;
   2919 		/*
   2920 		 * When the number of CPUs is one and the controller can use
   2921 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2922 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2923 		 * and the other is used for link status changing.
   2924 		 * In this situation, wm_nq_transmit() is disadvantageous
   2925 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2926 		 */
   2927 		if (wm_is_using_multiqueue(sc))
   2928 			ifp->if_transmit = wm_nq_transmit;
   2929 	} else {
   2930 		ifp->if_start = wm_start;
   2931 		/*
   2932 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2933 		 */
   2934 		if (wm_is_using_multiqueue(sc))
   2935 			ifp->if_transmit = wm_transmit;
   2936 	}
   2937 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2938 	ifp->if_init = wm_init;
   2939 	ifp->if_stop = wm_stop;
   2940 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2941 	IFQ_SET_READY(&ifp->if_snd);
   2942 
   2943 	/* Check for jumbo frame */
   2944 	switch (sc->sc_type) {
   2945 	case WM_T_82573:
   2946 		/* XXX limited to 9234 if ASPM is disabled */
   2947 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2948 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2949 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2950 		break;
   2951 	case WM_T_82571:
   2952 	case WM_T_82572:
   2953 	case WM_T_82574:
   2954 	case WM_T_82583:
   2955 	case WM_T_82575:
   2956 	case WM_T_82576:
   2957 	case WM_T_82580:
   2958 	case WM_T_I350:
   2959 	case WM_T_I354:
   2960 	case WM_T_I210:
   2961 	case WM_T_I211:
   2962 	case WM_T_80003:
   2963 	case WM_T_ICH9:
   2964 	case WM_T_ICH10:
   2965 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2966 	case WM_T_PCH_LPT:
   2967 	case WM_T_PCH_SPT:
   2968 	case WM_T_PCH_CNP:
   2969 		/* XXX limited to 9234 */
   2970 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2971 		break;
   2972 	case WM_T_PCH:
   2973 		/* XXX limited to 4096 */
   2974 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2975 		break;
   2976 	case WM_T_82542_2_0:
   2977 	case WM_T_82542_2_1:
   2978 	case WM_T_ICH8:
   2979 		/* No support for jumbo frame */
   2980 		break;
   2981 	default:
   2982 		/* ETHER_MAX_LEN_JUMBO */
   2983 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2984 		break;
   2985 	}
   2986 
   2987 	/* If we're a i82543 or greater, we can support VLANs. */
   2988 	if (sc->sc_type >= WM_T_82543) {
   2989 		sc->sc_ethercom.ec_capabilities |=
   2990 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2991 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2992 	}
   2993 
   2994 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2995 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2996 
   2997 	/*
   2998 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2999 	 * on i82543 and later.
   3000 	 */
   3001 	if (sc->sc_type >= WM_T_82543) {
   3002 		ifp->if_capabilities |=
   3003 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3004 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3005 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3006 		    IFCAP_CSUM_TCPv6_Tx |
   3007 		    IFCAP_CSUM_UDPv6_Tx;
   3008 	}
   3009 
   3010 	/*
   3011 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3012 	 *
   3013 	 *	82541GI (8086:1076) ... no
   3014 	 *	82572EI (8086:10b9) ... yes
   3015 	 */
   3016 	if (sc->sc_type >= WM_T_82571) {
   3017 		ifp->if_capabilities |=
   3018 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3019 	}
   3020 
   3021 	/*
   3022 	 * If we're a i82544 or greater (except i82547), we can do
   3023 	 * TCP segmentation offload.
   3024 	 */
   3025 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3026 		ifp->if_capabilities |= IFCAP_TSOv4;
   3027 	}
   3028 
   3029 	if (sc->sc_type >= WM_T_82571) {
   3030 		ifp->if_capabilities |= IFCAP_TSOv6;
   3031 	}
   3032 
   3033 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3034 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3035 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3036 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3037 
   3038 	/* Attach the interface. */
   3039 	error = if_initialize(ifp);
   3040 	if (error != 0) {
   3041 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3042 		    error);
   3043 		return; /* Error */
   3044 	}
   3045 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3046 	ether_ifattach(ifp, enaddr);
   3047 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3048 	if_register(ifp);
   3049 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3050 	    RND_FLAG_DEFAULT);
   3051 
   3052 #ifdef WM_EVENT_COUNTERS
   3053 	/* Attach event counters. */
   3054 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3055 	    NULL, xname, "linkintr");
   3056 
   3057 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3058 	    NULL, xname, "tx_xoff");
   3059 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3060 	    NULL, xname, "tx_xon");
   3061 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3062 	    NULL, xname, "rx_xoff");
   3063 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3064 	    NULL, xname, "rx_xon");
   3065 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3066 	    NULL, xname, "rx_macctl");
   3067 #endif /* WM_EVENT_COUNTERS */
   3068 
   3069 	sc->sc_txrx_use_workqueue = false;
   3070 
   3071 	wm_init_sysctls(sc);
   3072 
   3073 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3074 		pmf_class_network_register(self, ifp);
   3075 	else
   3076 		aprint_error_dev(self, "couldn't establish power handler\n");
   3077 
   3078 	sc->sc_flags |= WM_F_ATTACHED;
   3079 out:
   3080 	return;
   3081 }
   3082 
   3083 /* The detach function (ca_detach) */
   3084 static int
   3085 wm_detach(device_t self, int flags __unused)
   3086 {
   3087 	struct wm_softc *sc = device_private(self);
   3088 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3089 	int i;
   3090 
   3091 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3092 		return 0;
   3093 
   3094 	/* Stop the interface. Callouts are stopped in it. */
   3095 	wm_stop(ifp, 1);
   3096 
   3097 	pmf_device_deregister(self);
   3098 
   3099 	sysctl_teardown(&sc->sc_sysctllog);
   3100 
   3101 #ifdef WM_EVENT_COUNTERS
   3102 	evcnt_detach(&sc->sc_ev_linkintr);
   3103 
   3104 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3105 	evcnt_detach(&sc->sc_ev_tx_xon);
   3106 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3107 	evcnt_detach(&sc->sc_ev_rx_xon);
   3108 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3109 #endif /* WM_EVENT_COUNTERS */
   3110 
   3111 	rnd_detach_source(&sc->rnd_source);
   3112 
   3113 	/* Tell the firmware about the release */
   3114 	WM_CORE_LOCK(sc);
   3115 	wm_release_manageability(sc);
   3116 	wm_release_hw_control(sc);
   3117 	wm_enable_wakeup(sc);
   3118 	WM_CORE_UNLOCK(sc);
   3119 
   3120 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3121 
   3122 	ether_ifdetach(ifp);
   3123 	if_detach(ifp);
   3124 	if_percpuq_destroy(sc->sc_ipq);
   3125 
   3126 	/* Delete all remaining media. */
   3127 	ifmedia_fini(&sc->sc_mii.mii_media);
   3128 
   3129 	/* Unload RX dmamaps and free mbufs */
   3130 	for (i = 0; i < sc->sc_nqueues; i++) {
   3131 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3132 		mutex_enter(rxq->rxq_lock);
   3133 		wm_rxdrain(rxq);
   3134 		mutex_exit(rxq->rxq_lock);
   3135 	}
   3136 	/* Must unlock here */
   3137 
   3138 	/* Disestablish the interrupt handler */
   3139 	for (i = 0; i < sc->sc_nintrs; i++) {
   3140 		if (sc->sc_ihs[i] != NULL) {
   3141 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3142 			sc->sc_ihs[i] = NULL;
   3143 		}
   3144 	}
   3145 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3146 
   3147 	/* wm_stop() ensure workqueue is stopped. */
   3148 	workqueue_destroy(sc->sc_queue_wq);
   3149 
   3150 	for (i = 0; i < sc->sc_nqueues; i++)
   3151 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3152 
   3153 	wm_free_txrx_queues(sc);
   3154 
   3155 	/* Unmap the registers */
   3156 	if (sc->sc_ss) {
   3157 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3158 		sc->sc_ss = 0;
   3159 	}
   3160 	if (sc->sc_ios) {
   3161 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3162 		sc->sc_ios = 0;
   3163 	}
   3164 	if (sc->sc_flashs) {
   3165 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3166 		sc->sc_flashs = 0;
   3167 	}
   3168 
   3169 	if (sc->sc_core_lock)
   3170 		mutex_obj_free(sc->sc_core_lock);
   3171 	if (sc->sc_ich_phymtx)
   3172 		mutex_obj_free(sc->sc_ich_phymtx);
   3173 	if (sc->sc_ich_nvmmtx)
   3174 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3175 
   3176 	return 0;
   3177 }
   3178 
   3179 static bool
   3180 wm_suspend(device_t self, const pmf_qual_t *qual)
   3181 {
   3182 	struct wm_softc *sc = device_private(self);
   3183 
   3184 	wm_release_manageability(sc);
   3185 	wm_release_hw_control(sc);
   3186 	wm_enable_wakeup(sc);
   3187 
   3188 	return true;
   3189 }
   3190 
   3191 static bool
   3192 wm_resume(device_t self, const pmf_qual_t *qual)
   3193 {
   3194 	struct wm_softc *sc = device_private(self);
   3195 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3196 	pcireg_t reg;
   3197 	char buf[256];
   3198 
   3199 	reg = CSR_READ(sc, WMREG_WUS);
   3200 	if (reg != 0) {
   3201 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3202 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3203 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3204 	}
   3205 
   3206 	if (sc->sc_type >= WM_T_PCH2)
   3207 		wm_resume_workarounds_pchlan(sc);
   3208 	if ((ifp->if_flags & IFF_UP) == 0) {
   3209 		wm_reset(sc);
   3210 		/* Non-AMT based hardware can now take control from firmware */
   3211 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3212 			wm_get_hw_control(sc);
   3213 		wm_init_manageability(sc);
   3214 	} else {
   3215 		/*
   3216 		 * We called pmf_class_network_register(), so if_init() is
   3217 		 * automatically called when IFF_UP. wm_reset(),
   3218 		 * wm_get_hw_control() and wm_init_manageability() are called
   3219 		 * via wm_init().
   3220 		 */
   3221 	}
   3222 
   3223 	return true;
   3224 }
   3225 
   3226 /*
   3227  * wm_watchdog:		[ifnet interface function]
   3228  *
   3229  *	Watchdog timer handler.
   3230  */
   3231 static void
   3232 wm_watchdog(struct ifnet *ifp)
   3233 {
   3234 	int qid;
   3235 	struct wm_softc *sc = ifp->if_softc;
   3236 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3237 
   3238 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3239 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3240 
   3241 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3242 	}
   3243 
   3244 	/* IF any of queues hanged up, reset the interface. */
   3245 	if (hang_queue != 0) {
   3246 		(void)wm_init(ifp);
   3247 
   3248 		/*
   3249 		 * There are still some upper layer processing which call
   3250 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3251 		 */
   3252 		/* Try to get more packets going. */
   3253 		ifp->if_start(ifp);
   3254 	}
   3255 }
   3256 
   3257 
   3258 static void
   3259 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3260 {
   3261 
   3262 	mutex_enter(txq->txq_lock);
   3263 	if (txq->txq_sending &&
   3264 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3265 		wm_watchdog_txq_locked(ifp, txq, hang);
   3266 
   3267 	mutex_exit(txq->txq_lock);
   3268 }
   3269 
   3270 static void
   3271 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3272     uint16_t *hang)
   3273 {
   3274 	struct wm_softc *sc = ifp->if_softc;
   3275 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3276 
   3277 	KASSERT(mutex_owned(txq->txq_lock));
   3278 
   3279 	/*
   3280 	 * Since we're using delayed interrupts, sweep up
   3281 	 * before we report an error.
   3282 	 */
   3283 	wm_txeof(txq, UINT_MAX);
   3284 
   3285 	if (txq->txq_sending)
   3286 		*hang |= __BIT(wmq->wmq_id);
   3287 
   3288 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3289 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3290 		    device_xname(sc->sc_dev));
   3291 	} else {
   3292 #ifdef WM_DEBUG
   3293 		int i, j;
   3294 		struct wm_txsoft *txs;
   3295 #endif
   3296 		log(LOG_ERR,
   3297 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3298 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3299 		    txq->txq_next);
   3300 		if_statinc(ifp, if_oerrors);
   3301 #ifdef WM_DEBUG
   3302 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3303 		    i = WM_NEXTTXS(txq, i)) {
   3304 			txs = &txq->txq_soft[i];
   3305 			printf("txs %d tx %d -> %d\n",
   3306 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3307 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3308 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3309 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3310 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3311 					printf("\t %#08x%08x\n",
   3312 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3313 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3314 				} else {
   3315 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3316 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3317 					    txq->txq_descs[j].wtx_addr.wa_low);
   3318 					printf("\t %#04x%02x%02x%08x\n",
   3319 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3320 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3321 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3322 					    txq->txq_descs[j].wtx_cmdlen);
   3323 				}
   3324 				if (j == txs->txs_lastdesc)
   3325 					break;
   3326 			}
   3327 		}
   3328 #endif
   3329 	}
   3330 }
   3331 
   3332 /*
   3333  * wm_tick:
   3334  *
   3335  *	One second timer, used to check link status, sweep up
   3336  *	completed transmit jobs, etc.
   3337  */
   3338 static void
   3339 wm_tick(void *arg)
   3340 {
   3341 	struct wm_softc *sc = arg;
   3342 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3343 #ifndef WM_MPSAFE
   3344 	int s = splnet();
   3345 #endif
   3346 
   3347 	WM_CORE_LOCK(sc);
   3348 
   3349 	if (sc->sc_core_stopping) {
   3350 		WM_CORE_UNLOCK(sc);
   3351 #ifndef WM_MPSAFE
   3352 		splx(s);
   3353 #endif
   3354 		return;
   3355 	}
   3356 
   3357 	if (sc->sc_type >= WM_T_82542_2_1) {
   3358 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3359 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3360 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3361 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3362 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3363 	}
   3364 
   3365 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3366 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3367 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3368 	    + CSR_READ(sc, WMREG_CRCERRS)
   3369 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3370 	    + CSR_READ(sc, WMREG_SYMERRC)
   3371 	    + CSR_READ(sc, WMREG_RXERRC)
   3372 	    + CSR_READ(sc, WMREG_SEC)
   3373 	    + CSR_READ(sc, WMREG_CEXTERR)
   3374 	    + CSR_READ(sc, WMREG_RLEC));
   3375 	/*
   3376 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3377 	 * memory. It does not mean the number of dropped packet. Because
   3378 	 * ethernet controller can receive packets in such case if there is
   3379 	 * space in phy's FIFO.
   3380 	 *
   3381 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3382 	 * own EVCNT instead of if_iqdrops.
   3383 	 */
   3384 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3385 	IF_STAT_PUTREF(ifp);
   3386 
   3387 	if (sc->sc_flags & WM_F_HAS_MII)
   3388 		mii_tick(&sc->sc_mii);
   3389 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3390 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3391 		wm_serdes_tick(sc);
   3392 	else
   3393 		wm_tbi_tick(sc);
   3394 
   3395 	WM_CORE_UNLOCK(sc);
   3396 
   3397 	wm_watchdog(ifp);
   3398 
   3399 	callout_schedule(&sc->sc_tick_ch, hz);
   3400 }
   3401 
   3402 static int
   3403 wm_ifflags_cb(struct ethercom *ec)
   3404 {
   3405 	struct ifnet *ifp = &ec->ec_if;
   3406 	struct wm_softc *sc = ifp->if_softc;
   3407 	u_short iffchange;
   3408 	int ecchange;
   3409 	bool needreset = false;
   3410 	int rc = 0;
   3411 
   3412 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3413 		device_xname(sc->sc_dev), __func__));
   3414 
   3415 	WM_CORE_LOCK(sc);
   3416 
   3417 	/*
   3418 	 * Check for if_flags.
   3419 	 * Main usage is to prevent linkdown when opening bpf.
   3420 	 */
   3421 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3422 	sc->sc_if_flags = ifp->if_flags;
   3423 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3424 		needreset = true;
   3425 		goto ec;
   3426 	}
   3427 
   3428 	/* iff related updates */
   3429 	if ((iffchange & IFF_PROMISC) != 0)
   3430 		wm_set_filter(sc);
   3431 
   3432 	wm_set_vlan(sc);
   3433 
   3434 ec:
   3435 	/* Check for ec_capenable. */
   3436 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3437 	sc->sc_ec_capenable = ec->ec_capenable;
   3438 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3439 		needreset = true;
   3440 		goto out;
   3441 	}
   3442 
   3443 	/* ec related updates */
   3444 	wm_set_eee(sc);
   3445 
   3446 out:
   3447 	if (needreset)
   3448 		rc = ENETRESET;
   3449 	WM_CORE_UNLOCK(sc);
   3450 
   3451 	return rc;
   3452 }
   3453 
   3454 /*
   3455  * wm_ioctl:		[ifnet interface function]
   3456  *
   3457  *	Handle control requests from the operator.
   3458  */
   3459 static int
   3460 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3461 {
   3462 	struct wm_softc *sc = ifp->if_softc;
   3463 	struct ifreq *ifr = (struct ifreq *)data;
   3464 	struct ifaddr *ifa = (struct ifaddr *)data;
   3465 	struct sockaddr_dl *sdl;
   3466 	int s, error;
   3467 
   3468 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3469 		device_xname(sc->sc_dev), __func__));
   3470 
   3471 #ifndef WM_MPSAFE
   3472 	s = splnet();
   3473 #endif
   3474 	switch (cmd) {
   3475 	case SIOCSIFMEDIA:
   3476 		WM_CORE_LOCK(sc);
   3477 		/* Flow control requires full-duplex mode. */
   3478 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3479 		    (ifr->ifr_media & IFM_FDX) == 0)
   3480 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3481 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3482 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3483 				/* We can do both TXPAUSE and RXPAUSE. */
   3484 				ifr->ifr_media |=
   3485 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3486 			}
   3487 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3488 		}
   3489 		WM_CORE_UNLOCK(sc);
   3490 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3491 		break;
   3492 	case SIOCINITIFADDR:
   3493 		WM_CORE_LOCK(sc);
   3494 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3495 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3496 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3497 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3498 			/* Unicast address is the first multicast entry */
   3499 			wm_set_filter(sc);
   3500 			error = 0;
   3501 			WM_CORE_UNLOCK(sc);
   3502 			break;
   3503 		}
   3504 		WM_CORE_UNLOCK(sc);
   3505 		/*FALLTHROUGH*/
   3506 	default:
   3507 #ifdef WM_MPSAFE
   3508 		s = splnet();
   3509 #endif
   3510 		/* It may call wm_start, so unlock here */
   3511 		error = ether_ioctl(ifp, cmd, data);
   3512 #ifdef WM_MPSAFE
   3513 		splx(s);
   3514 #endif
   3515 		if (error != ENETRESET)
   3516 			break;
   3517 
   3518 		error = 0;
   3519 
   3520 		if (cmd == SIOCSIFCAP)
   3521 			error = (*ifp->if_init)(ifp);
   3522 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3523 			;
   3524 		else if (ifp->if_flags & IFF_RUNNING) {
   3525 			/*
   3526 			 * Multicast list has changed; set the hardware filter
   3527 			 * accordingly.
   3528 			 */
   3529 			WM_CORE_LOCK(sc);
   3530 			wm_set_filter(sc);
   3531 			WM_CORE_UNLOCK(sc);
   3532 		}
   3533 		break;
   3534 	}
   3535 
   3536 #ifndef WM_MPSAFE
   3537 	splx(s);
   3538 #endif
   3539 	return error;
   3540 }
   3541 
   3542 /* MAC address related */
   3543 
   3544 /*
   3545  * Get the offset of MAC address and return it.
   3546  * If error occured, use offset 0.
   3547  */
   3548 static uint16_t
   3549 wm_check_alt_mac_addr(struct wm_softc *sc)
   3550 {
   3551 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3552 	uint16_t offset = NVM_OFF_MACADDR;
   3553 
   3554 	/* Try to read alternative MAC address pointer */
   3555 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3556 		return 0;
   3557 
   3558 	/* Check pointer if it's valid or not. */
   3559 	if ((offset == 0x0000) || (offset == 0xffff))
   3560 		return 0;
   3561 
   3562 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3563 	/*
   3564 	 * Check whether alternative MAC address is valid or not.
   3565 	 * Some cards have non 0xffff pointer but those don't use
   3566 	 * alternative MAC address in reality.
   3567 	 *
   3568 	 * Check whether the broadcast bit is set or not.
   3569 	 */
   3570 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3571 		if (((myea[0] & 0xff) & 0x01) == 0)
   3572 			return offset; /* Found */
   3573 
   3574 	/* Not found */
   3575 	return 0;
   3576 }
   3577 
   3578 static int
   3579 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3580 {
   3581 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3582 	uint16_t offset = NVM_OFF_MACADDR;
   3583 	int do_invert = 0;
   3584 
   3585 	switch (sc->sc_type) {
   3586 	case WM_T_82580:
   3587 	case WM_T_I350:
   3588 	case WM_T_I354:
   3589 		/* EEPROM Top Level Partitioning */
   3590 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3591 		break;
   3592 	case WM_T_82571:
   3593 	case WM_T_82575:
   3594 	case WM_T_82576:
   3595 	case WM_T_80003:
   3596 	case WM_T_I210:
   3597 	case WM_T_I211:
   3598 		offset = wm_check_alt_mac_addr(sc);
   3599 		if (offset == 0)
   3600 			if ((sc->sc_funcid & 0x01) == 1)
   3601 				do_invert = 1;
   3602 		break;
   3603 	default:
   3604 		if ((sc->sc_funcid & 0x01) == 1)
   3605 			do_invert = 1;
   3606 		break;
   3607 	}
   3608 
   3609 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3610 		goto bad;
   3611 
   3612 	enaddr[0] = myea[0] & 0xff;
   3613 	enaddr[1] = myea[0] >> 8;
   3614 	enaddr[2] = myea[1] & 0xff;
   3615 	enaddr[3] = myea[1] >> 8;
   3616 	enaddr[4] = myea[2] & 0xff;
   3617 	enaddr[5] = myea[2] >> 8;
   3618 
   3619 	/*
   3620 	 * Toggle the LSB of the MAC address on the second port
   3621 	 * of some dual port cards.
   3622 	 */
   3623 	if (do_invert != 0)
   3624 		enaddr[5] ^= 1;
   3625 
   3626 	return 0;
   3627 
   3628  bad:
   3629 	return -1;
   3630 }
   3631 
   3632 /*
   3633  * wm_set_ral:
   3634  *
   3635  *	Set an entery in the receive address list.
   3636  */
   3637 static void
   3638 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3639 {
   3640 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3641 	uint32_t wlock_mac;
   3642 	int rv;
   3643 
   3644 	if (enaddr != NULL) {
   3645 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3646 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3647 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3648 		ral_hi |= RAL_AV;
   3649 	} else {
   3650 		ral_lo = 0;
   3651 		ral_hi = 0;
   3652 	}
   3653 
   3654 	switch (sc->sc_type) {
   3655 	case WM_T_82542_2_0:
   3656 	case WM_T_82542_2_1:
   3657 	case WM_T_82543:
   3658 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3659 		CSR_WRITE_FLUSH(sc);
   3660 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3661 		CSR_WRITE_FLUSH(sc);
   3662 		break;
   3663 	case WM_T_PCH2:
   3664 	case WM_T_PCH_LPT:
   3665 	case WM_T_PCH_SPT:
   3666 	case WM_T_PCH_CNP:
   3667 		if (idx == 0) {
   3668 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3669 			CSR_WRITE_FLUSH(sc);
   3670 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3671 			CSR_WRITE_FLUSH(sc);
   3672 			return;
   3673 		}
   3674 		if (sc->sc_type != WM_T_PCH2) {
   3675 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3676 			    FWSM_WLOCK_MAC);
   3677 			addrl = WMREG_SHRAL(idx - 1);
   3678 			addrh = WMREG_SHRAH(idx - 1);
   3679 		} else {
   3680 			wlock_mac = 0;
   3681 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3682 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3683 		}
   3684 
   3685 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3686 			rv = wm_get_swflag_ich8lan(sc);
   3687 			if (rv != 0)
   3688 				return;
   3689 			CSR_WRITE(sc, addrl, ral_lo);
   3690 			CSR_WRITE_FLUSH(sc);
   3691 			CSR_WRITE(sc, addrh, ral_hi);
   3692 			CSR_WRITE_FLUSH(sc);
   3693 			wm_put_swflag_ich8lan(sc);
   3694 		}
   3695 
   3696 		break;
   3697 	default:
   3698 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3699 		CSR_WRITE_FLUSH(sc);
   3700 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3701 		CSR_WRITE_FLUSH(sc);
   3702 		break;
   3703 	}
   3704 }
   3705 
   3706 /*
   3707  * wm_mchash:
   3708  *
   3709  *	Compute the hash of the multicast address for the 4096-bit
   3710  *	multicast filter.
   3711  */
   3712 static uint32_t
   3713 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3714 {
   3715 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3716 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3717 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3718 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3719 	uint32_t hash;
   3720 
   3721 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3722 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3723 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3724 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3725 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3726 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3727 		return (hash & 0x3ff);
   3728 	}
   3729 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3730 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3731 
   3732 	return (hash & 0xfff);
   3733 }
   3734 
   3735 /*
   3736  *
   3737  *
   3738  */
   3739 static int
   3740 wm_rar_count(struct wm_softc *sc)
   3741 {
   3742 	int size;
   3743 
   3744 	switch (sc->sc_type) {
   3745 	case WM_T_ICH8:
   3746 		size = WM_RAL_TABSIZE_ICH8 -1;
   3747 		break;
   3748 	case WM_T_ICH9:
   3749 	case WM_T_ICH10:
   3750 	case WM_T_PCH:
   3751 		size = WM_RAL_TABSIZE_ICH8;
   3752 		break;
   3753 	case WM_T_PCH2:
   3754 		size = WM_RAL_TABSIZE_PCH2;
   3755 		break;
   3756 	case WM_T_PCH_LPT:
   3757 	case WM_T_PCH_SPT:
   3758 	case WM_T_PCH_CNP:
   3759 		size = WM_RAL_TABSIZE_PCH_LPT;
   3760 		break;
   3761 	case WM_T_82575:
   3762 	case WM_T_I210:
   3763 	case WM_T_I211:
   3764 		size = WM_RAL_TABSIZE_82575;
   3765 		break;
   3766 	case WM_T_82576:
   3767 	case WM_T_82580:
   3768 		size = WM_RAL_TABSIZE_82576;
   3769 		break;
   3770 	case WM_T_I350:
   3771 	case WM_T_I354:
   3772 		size = WM_RAL_TABSIZE_I350;
   3773 		break;
   3774 	default:
   3775 		size = WM_RAL_TABSIZE;
   3776 	}
   3777 
   3778 	return size;
   3779 }
   3780 
   3781 /*
   3782  * wm_set_filter:
   3783  *
   3784  *	Set up the receive filter.
   3785  */
   3786 static void
   3787 wm_set_filter(struct wm_softc *sc)
   3788 {
   3789 	struct ethercom *ec = &sc->sc_ethercom;
   3790 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3791 	struct ether_multi *enm;
   3792 	struct ether_multistep step;
   3793 	bus_addr_t mta_reg;
   3794 	uint32_t hash, reg, bit;
   3795 	int i, size, ralmax;
   3796 
   3797 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3798 		device_xname(sc->sc_dev), __func__));
   3799 
   3800 	if (sc->sc_type >= WM_T_82544)
   3801 		mta_reg = WMREG_CORDOVA_MTA;
   3802 	else
   3803 		mta_reg = WMREG_MTA;
   3804 
   3805 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3806 
   3807 	if (ifp->if_flags & IFF_BROADCAST)
   3808 		sc->sc_rctl |= RCTL_BAM;
   3809 	if (ifp->if_flags & IFF_PROMISC) {
   3810 		sc->sc_rctl |= RCTL_UPE;
   3811 		ETHER_LOCK(ec);
   3812 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3813 		ETHER_UNLOCK(ec);
   3814 		goto allmulti;
   3815 	}
   3816 
   3817 	/*
   3818 	 * Set the station address in the first RAL slot, and
   3819 	 * clear the remaining slots.
   3820 	 */
   3821 	size = wm_rar_count(sc);
   3822 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3823 
   3824 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3825 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3826 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3827 		switch (i) {
   3828 		case 0:
   3829 			/* We can use all entries */
   3830 			ralmax = size;
   3831 			break;
   3832 		case 1:
   3833 			/* Only RAR[0] */
   3834 			ralmax = 1;
   3835 			break;
   3836 		default:
   3837 			/* Available SHRA + RAR[0] */
   3838 			ralmax = i + 1;
   3839 		}
   3840 	} else
   3841 		ralmax = size;
   3842 	for (i = 1; i < size; i++) {
   3843 		if (i < ralmax)
   3844 			wm_set_ral(sc, NULL, i);
   3845 	}
   3846 
   3847 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3848 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3849 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3850 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3851 		size = WM_ICH8_MC_TABSIZE;
   3852 	else
   3853 		size = WM_MC_TABSIZE;
   3854 	/* Clear out the multicast table. */
   3855 	for (i = 0; i < size; i++) {
   3856 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3857 		CSR_WRITE_FLUSH(sc);
   3858 	}
   3859 
   3860 	ETHER_LOCK(ec);
   3861 	ETHER_FIRST_MULTI(step, ec, enm);
   3862 	while (enm != NULL) {
   3863 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3864 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3865 			ETHER_UNLOCK(ec);
   3866 			/*
   3867 			 * We must listen to a range of multicast addresses.
   3868 			 * For now, just accept all multicasts, rather than
   3869 			 * trying to set only those filter bits needed to match
   3870 			 * the range.  (At this time, the only use of address
   3871 			 * ranges is for IP multicast routing, for which the
   3872 			 * range is big enough to require all bits set.)
   3873 			 */
   3874 			goto allmulti;
   3875 		}
   3876 
   3877 		hash = wm_mchash(sc, enm->enm_addrlo);
   3878 
   3879 		reg = (hash >> 5);
   3880 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3881 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3882 		    || (sc->sc_type == WM_T_PCH2)
   3883 		    || (sc->sc_type == WM_T_PCH_LPT)
   3884 		    || (sc->sc_type == WM_T_PCH_SPT)
   3885 		    || (sc->sc_type == WM_T_PCH_CNP))
   3886 			reg &= 0x1f;
   3887 		else
   3888 			reg &= 0x7f;
   3889 		bit = hash & 0x1f;
   3890 
   3891 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3892 		hash |= 1U << bit;
   3893 
   3894 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3895 			/*
   3896 			 * 82544 Errata 9: Certain register cannot be written
   3897 			 * with particular alignments in PCI-X bus operation
   3898 			 * (FCAH, MTA and VFTA).
   3899 			 */
   3900 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3901 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3902 			CSR_WRITE_FLUSH(sc);
   3903 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3904 			CSR_WRITE_FLUSH(sc);
   3905 		} else {
   3906 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3907 			CSR_WRITE_FLUSH(sc);
   3908 		}
   3909 
   3910 		ETHER_NEXT_MULTI(step, enm);
   3911 	}
   3912 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3913 	ETHER_UNLOCK(ec);
   3914 
   3915 	goto setit;
   3916 
   3917  allmulti:
   3918 	sc->sc_rctl |= RCTL_MPE;
   3919 
   3920  setit:
   3921 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3922 }
   3923 
   3924 /* Reset and init related */
   3925 
   3926 static void
   3927 wm_set_vlan(struct wm_softc *sc)
   3928 {
   3929 
   3930 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3931 		device_xname(sc->sc_dev), __func__));
   3932 
   3933 	/* Deal with VLAN enables. */
   3934 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3935 		sc->sc_ctrl |= CTRL_VME;
   3936 	else
   3937 		sc->sc_ctrl &= ~CTRL_VME;
   3938 
   3939 	/* Write the control registers. */
   3940 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3941 }
   3942 
   3943 static void
   3944 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3945 {
   3946 	uint32_t gcr;
   3947 	pcireg_t ctrl2;
   3948 
   3949 	gcr = CSR_READ(sc, WMREG_GCR);
   3950 
   3951 	/* Only take action if timeout value is defaulted to 0 */
   3952 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3953 		goto out;
   3954 
   3955 	if ((gcr & GCR_CAP_VER2) == 0) {
   3956 		gcr |= GCR_CMPL_TMOUT_10MS;
   3957 		goto out;
   3958 	}
   3959 
   3960 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3961 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3962 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3963 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3964 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3965 
   3966 out:
   3967 	/* Disable completion timeout resend */
   3968 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3969 
   3970 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3971 }
   3972 
   3973 void
   3974 wm_get_auto_rd_done(struct wm_softc *sc)
   3975 {
   3976 	int i;
   3977 
   3978 	/* wait for eeprom to reload */
   3979 	switch (sc->sc_type) {
   3980 	case WM_T_82571:
   3981 	case WM_T_82572:
   3982 	case WM_T_82573:
   3983 	case WM_T_82574:
   3984 	case WM_T_82583:
   3985 	case WM_T_82575:
   3986 	case WM_T_82576:
   3987 	case WM_T_82580:
   3988 	case WM_T_I350:
   3989 	case WM_T_I354:
   3990 	case WM_T_I210:
   3991 	case WM_T_I211:
   3992 	case WM_T_80003:
   3993 	case WM_T_ICH8:
   3994 	case WM_T_ICH9:
   3995 		for (i = 0; i < 10; i++) {
   3996 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3997 				break;
   3998 			delay(1000);
   3999 		}
   4000 		if (i == 10) {
   4001 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4002 			    "complete\n", device_xname(sc->sc_dev));
   4003 		}
   4004 		break;
   4005 	default:
   4006 		break;
   4007 	}
   4008 }
   4009 
   4010 void
   4011 wm_lan_init_done(struct wm_softc *sc)
   4012 {
   4013 	uint32_t reg = 0;
   4014 	int i;
   4015 
   4016 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4017 		device_xname(sc->sc_dev), __func__));
   4018 
   4019 	/* Wait for eeprom to reload */
   4020 	switch (sc->sc_type) {
   4021 	case WM_T_ICH10:
   4022 	case WM_T_PCH:
   4023 	case WM_T_PCH2:
   4024 	case WM_T_PCH_LPT:
   4025 	case WM_T_PCH_SPT:
   4026 	case WM_T_PCH_CNP:
   4027 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4028 			reg = CSR_READ(sc, WMREG_STATUS);
   4029 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4030 				break;
   4031 			delay(100);
   4032 		}
   4033 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4034 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4035 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4036 		}
   4037 		break;
   4038 	default:
   4039 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4040 		    __func__);
   4041 		break;
   4042 	}
   4043 
   4044 	reg &= ~STATUS_LAN_INIT_DONE;
   4045 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4046 }
   4047 
   4048 void
   4049 wm_get_cfg_done(struct wm_softc *sc)
   4050 {
   4051 	int mask;
   4052 	uint32_t reg;
   4053 	int i;
   4054 
   4055 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4056 		device_xname(sc->sc_dev), __func__));
   4057 
   4058 	/* Wait for eeprom to reload */
   4059 	switch (sc->sc_type) {
   4060 	case WM_T_82542_2_0:
   4061 	case WM_T_82542_2_1:
   4062 		/* null */
   4063 		break;
   4064 	case WM_T_82543:
   4065 	case WM_T_82544:
   4066 	case WM_T_82540:
   4067 	case WM_T_82545:
   4068 	case WM_T_82545_3:
   4069 	case WM_T_82546:
   4070 	case WM_T_82546_3:
   4071 	case WM_T_82541:
   4072 	case WM_T_82541_2:
   4073 	case WM_T_82547:
   4074 	case WM_T_82547_2:
   4075 	case WM_T_82573:
   4076 	case WM_T_82574:
   4077 	case WM_T_82583:
   4078 		/* generic */
   4079 		delay(10*1000);
   4080 		break;
   4081 	case WM_T_80003:
   4082 	case WM_T_82571:
   4083 	case WM_T_82572:
   4084 	case WM_T_82575:
   4085 	case WM_T_82576:
   4086 	case WM_T_82580:
   4087 	case WM_T_I350:
   4088 	case WM_T_I354:
   4089 	case WM_T_I210:
   4090 	case WM_T_I211:
   4091 		if (sc->sc_type == WM_T_82571) {
   4092 			/* Only 82571 shares port 0 */
   4093 			mask = EEMNGCTL_CFGDONE_0;
   4094 		} else
   4095 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4096 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4097 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4098 				break;
   4099 			delay(1000);
   4100 		}
   4101 		if (i >= WM_PHY_CFG_TIMEOUT)
   4102 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4103 				device_xname(sc->sc_dev), __func__));
   4104 		break;
   4105 	case WM_T_ICH8:
   4106 	case WM_T_ICH9:
   4107 	case WM_T_ICH10:
   4108 	case WM_T_PCH:
   4109 	case WM_T_PCH2:
   4110 	case WM_T_PCH_LPT:
   4111 	case WM_T_PCH_SPT:
   4112 	case WM_T_PCH_CNP:
   4113 		delay(10*1000);
   4114 		if (sc->sc_type >= WM_T_ICH10)
   4115 			wm_lan_init_done(sc);
   4116 		else
   4117 			wm_get_auto_rd_done(sc);
   4118 
   4119 		/* Clear PHY Reset Asserted bit */
   4120 		reg = CSR_READ(sc, WMREG_STATUS);
   4121 		if ((reg & STATUS_PHYRA) != 0)
   4122 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4123 		break;
   4124 	default:
   4125 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4126 		    __func__);
   4127 		break;
   4128 	}
   4129 }
   4130 
   4131 int
   4132 wm_phy_post_reset(struct wm_softc *sc)
   4133 {
   4134 	device_t dev = sc->sc_dev;
   4135 	uint16_t reg;
   4136 	int rv = 0;
   4137 
   4138 	/* This function is only for ICH8 and newer. */
   4139 	if (sc->sc_type < WM_T_ICH8)
   4140 		return 0;
   4141 
   4142 	if (wm_phy_resetisblocked(sc)) {
   4143 		/* XXX */
   4144 		device_printf(dev, "PHY is blocked\n");
   4145 		return -1;
   4146 	}
   4147 
   4148 	/* Allow time for h/w to get to quiescent state after reset */
   4149 	delay(10*1000);
   4150 
   4151 	/* Perform any necessary post-reset workarounds */
   4152 	if (sc->sc_type == WM_T_PCH)
   4153 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4154 	else if (sc->sc_type == WM_T_PCH2)
   4155 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4156 	if (rv != 0)
   4157 		return rv;
   4158 
   4159 	/* Clear the host wakeup bit after lcd reset */
   4160 	if (sc->sc_type >= WM_T_PCH) {
   4161 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4162 		reg &= ~BM_WUC_HOST_WU_BIT;
   4163 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4164 	}
   4165 
   4166 	/* Configure the LCD with the extended configuration region in NVM */
   4167 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4168 		return rv;
   4169 
   4170 	/* Configure the LCD with the OEM bits in NVM */
   4171 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4172 
   4173 	if (sc->sc_type == WM_T_PCH2) {
   4174 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4175 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4176 			delay(10 * 1000);
   4177 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4178 		}
   4179 		/* Set EEE LPI Update Timer to 200usec */
   4180 		rv = sc->phy.acquire(sc);
   4181 		if (rv)
   4182 			return rv;
   4183 		rv = wm_write_emi_reg_locked(dev,
   4184 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4185 		sc->phy.release(sc);
   4186 	}
   4187 
   4188 	return rv;
   4189 }
   4190 
   4191 /* Only for PCH and newer */
   4192 static int
   4193 wm_write_smbus_addr(struct wm_softc *sc)
   4194 {
   4195 	uint32_t strap, freq;
   4196 	uint16_t phy_data;
   4197 	int rv;
   4198 
   4199 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4200 		device_xname(sc->sc_dev), __func__));
   4201 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4202 
   4203 	strap = CSR_READ(sc, WMREG_STRAP);
   4204 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4205 
   4206 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4207 	if (rv != 0)
   4208 		return -1;
   4209 
   4210 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4211 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4212 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4213 
   4214 	if (sc->sc_phytype == WMPHY_I217) {
   4215 		/* Restore SMBus frequency */
   4216 		if (freq --) {
   4217 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4218 			    | HV_SMB_ADDR_FREQ_HIGH);
   4219 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4220 			    HV_SMB_ADDR_FREQ_LOW);
   4221 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4222 			    HV_SMB_ADDR_FREQ_HIGH);
   4223 		} else
   4224 			DPRINTF(WM_DEBUG_INIT,
   4225 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4226 				device_xname(sc->sc_dev), __func__));
   4227 	}
   4228 
   4229 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4230 	    phy_data);
   4231 }
   4232 
   4233 static int
   4234 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4235 {
   4236 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4237 	uint16_t phy_page = 0;
   4238 	int rv = 0;
   4239 
   4240 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4241 		device_xname(sc->sc_dev), __func__));
   4242 
   4243 	switch (sc->sc_type) {
   4244 	case WM_T_ICH8:
   4245 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4246 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4247 			return 0;
   4248 
   4249 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4250 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4251 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4252 			break;
   4253 		}
   4254 		/* FALLTHROUGH */
   4255 	case WM_T_PCH:
   4256 	case WM_T_PCH2:
   4257 	case WM_T_PCH_LPT:
   4258 	case WM_T_PCH_SPT:
   4259 	case WM_T_PCH_CNP:
   4260 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4261 		break;
   4262 	default:
   4263 		return 0;
   4264 	}
   4265 
   4266 	if ((rv = sc->phy.acquire(sc)) != 0)
   4267 		return rv;
   4268 
   4269 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4270 	if ((reg & sw_cfg_mask) == 0)
   4271 		goto release;
   4272 
   4273 	/*
   4274 	 * Make sure HW does not configure LCD from PHY extended configuration
   4275 	 * before SW configuration
   4276 	 */
   4277 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4278 	if ((sc->sc_type < WM_T_PCH2)
   4279 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4280 		goto release;
   4281 
   4282 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4283 		device_xname(sc->sc_dev), __func__));
   4284 	/* word_addr is in DWORD */
   4285 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4286 
   4287 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4288 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4289 	if (cnf_size == 0)
   4290 		goto release;
   4291 
   4292 	if (((sc->sc_type == WM_T_PCH)
   4293 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4294 	    || (sc->sc_type > WM_T_PCH)) {
   4295 		/*
   4296 		 * HW configures the SMBus address and LEDs when the OEM and
   4297 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4298 		 * are cleared, SW will configure them instead.
   4299 		 */
   4300 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4301 			device_xname(sc->sc_dev), __func__));
   4302 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4303 			goto release;
   4304 
   4305 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4306 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4307 		    (uint16_t)reg);
   4308 		if (rv != 0)
   4309 			goto release;
   4310 	}
   4311 
   4312 	/* Configure LCD from extended configuration region. */
   4313 	for (i = 0; i < cnf_size; i++) {
   4314 		uint16_t reg_data, reg_addr;
   4315 
   4316 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4317 			goto release;
   4318 
   4319 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4320 			goto release;
   4321 
   4322 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4323 			phy_page = reg_data;
   4324 
   4325 		reg_addr &= IGPHY_MAXREGADDR;
   4326 		reg_addr |= phy_page;
   4327 
   4328 		KASSERT(sc->phy.writereg_locked != NULL);
   4329 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4330 		    reg_data);
   4331 	}
   4332 
   4333 release:
   4334 	sc->phy.release(sc);
   4335 	return rv;
   4336 }
   4337 
   4338 /*
   4339  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4340  *  @sc:       pointer to the HW structure
   4341  *  @d0_state: boolean if entering d0 or d3 device state
   4342  *
   4343  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4344  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4345  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4346  */
   4347 int
   4348 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4349 {
   4350 	uint32_t mac_reg;
   4351 	uint16_t oem_reg;
   4352 	int rv;
   4353 
   4354 	if (sc->sc_type < WM_T_PCH)
   4355 		return 0;
   4356 
   4357 	rv = sc->phy.acquire(sc);
   4358 	if (rv != 0)
   4359 		return rv;
   4360 
   4361 	if (sc->sc_type == WM_T_PCH) {
   4362 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4363 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4364 			goto release;
   4365 	}
   4366 
   4367 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4368 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4369 		goto release;
   4370 
   4371 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4372 
   4373 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4374 	if (rv != 0)
   4375 		goto release;
   4376 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4377 
   4378 	if (d0_state) {
   4379 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4380 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4381 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4382 			oem_reg |= HV_OEM_BITS_LPLU;
   4383 	} else {
   4384 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4385 		    != 0)
   4386 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4387 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4388 		    != 0)
   4389 			oem_reg |= HV_OEM_BITS_LPLU;
   4390 	}
   4391 
   4392 	/* Set Restart auto-neg to activate the bits */
   4393 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4394 	    && (wm_phy_resetisblocked(sc) == false))
   4395 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4396 
   4397 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4398 
   4399 release:
   4400 	sc->phy.release(sc);
   4401 
   4402 	return rv;
   4403 }
   4404 
   4405 /* Init hardware bits */
   4406 void
   4407 wm_initialize_hardware_bits(struct wm_softc *sc)
   4408 {
   4409 	uint32_t tarc0, tarc1, reg;
   4410 
   4411 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4412 		device_xname(sc->sc_dev), __func__));
   4413 
   4414 	/* For 82571 variant, 80003 and ICHs */
   4415 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4416 	    || (sc->sc_type >= WM_T_80003)) {
   4417 
   4418 		/* Transmit Descriptor Control 0 */
   4419 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4420 		reg |= TXDCTL_COUNT_DESC;
   4421 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4422 
   4423 		/* Transmit Descriptor Control 1 */
   4424 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4425 		reg |= TXDCTL_COUNT_DESC;
   4426 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4427 
   4428 		/* TARC0 */
   4429 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4430 		switch (sc->sc_type) {
   4431 		case WM_T_82571:
   4432 		case WM_T_82572:
   4433 		case WM_T_82573:
   4434 		case WM_T_82574:
   4435 		case WM_T_82583:
   4436 		case WM_T_80003:
   4437 			/* Clear bits 30..27 */
   4438 			tarc0 &= ~__BITS(30, 27);
   4439 			break;
   4440 		default:
   4441 			break;
   4442 		}
   4443 
   4444 		switch (sc->sc_type) {
   4445 		case WM_T_82571:
   4446 		case WM_T_82572:
   4447 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4448 
   4449 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4450 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4451 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4452 			/* 8257[12] Errata No.7 */
   4453 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4454 
   4455 			/* TARC1 bit 28 */
   4456 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4457 				tarc1 &= ~__BIT(28);
   4458 			else
   4459 				tarc1 |= __BIT(28);
   4460 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4461 
   4462 			/*
   4463 			 * 8257[12] Errata No.13
   4464 			 * Disable Dyamic Clock Gating.
   4465 			 */
   4466 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4467 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4468 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4469 			break;
   4470 		case WM_T_82573:
   4471 		case WM_T_82574:
   4472 		case WM_T_82583:
   4473 			if ((sc->sc_type == WM_T_82574)
   4474 			    || (sc->sc_type == WM_T_82583))
   4475 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4476 
   4477 			/* Extended Device Control */
   4478 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4479 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4480 			reg |= __BIT(22);	/* Set bit 22 */
   4481 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4482 
   4483 			/* Device Control */
   4484 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4485 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4486 
   4487 			/* PCIe Control Register */
   4488 			/*
   4489 			 * 82573 Errata (unknown).
   4490 			 *
   4491 			 * 82574 Errata 25 and 82583 Errata 12
   4492 			 * "Dropped Rx Packets":
   4493 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4494 			 */
   4495 			reg = CSR_READ(sc, WMREG_GCR);
   4496 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4497 			CSR_WRITE(sc, WMREG_GCR, reg);
   4498 
   4499 			if ((sc->sc_type == WM_T_82574)
   4500 			    || (sc->sc_type == WM_T_82583)) {
   4501 				/*
   4502 				 * Document says this bit must be set for
   4503 				 * proper operation.
   4504 				 */
   4505 				reg = CSR_READ(sc, WMREG_GCR);
   4506 				reg |= __BIT(22);
   4507 				CSR_WRITE(sc, WMREG_GCR, reg);
   4508 
   4509 				/*
   4510 				 * Apply workaround for hardware errata
   4511 				 * documented in errata docs Fixes issue where
   4512 				 * some error prone or unreliable PCIe
   4513 				 * completions are occurring, particularly
   4514 				 * with ASPM enabled. Without fix, issue can
   4515 				 * cause Tx timeouts.
   4516 				 */
   4517 				reg = CSR_READ(sc, WMREG_GCR2);
   4518 				reg |= __BIT(0);
   4519 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4520 			}
   4521 			break;
   4522 		case WM_T_80003:
   4523 			/* TARC0 */
   4524 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4525 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4526 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4527 
   4528 			/* TARC1 bit 28 */
   4529 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4530 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4531 				tarc1 &= ~__BIT(28);
   4532 			else
   4533 				tarc1 |= __BIT(28);
   4534 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4535 			break;
   4536 		case WM_T_ICH8:
   4537 		case WM_T_ICH9:
   4538 		case WM_T_ICH10:
   4539 		case WM_T_PCH:
   4540 		case WM_T_PCH2:
   4541 		case WM_T_PCH_LPT:
   4542 		case WM_T_PCH_SPT:
   4543 		case WM_T_PCH_CNP:
   4544 			/* TARC0 */
   4545 			if (sc->sc_type == WM_T_ICH8) {
   4546 				/* Set TARC0 bits 29 and 28 */
   4547 				tarc0 |= __BITS(29, 28);
   4548 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4549 				tarc0 |= __BIT(29);
   4550 				/*
   4551 				 *  Drop bit 28. From Linux.
   4552 				 * See I218/I219 spec update
   4553 				 * "5. Buffer Overrun While the I219 is
   4554 				 * Processing DMA Transactions"
   4555 				 */
   4556 				tarc0 &= ~__BIT(28);
   4557 			}
   4558 			/* Set TARC0 bits 23,24,26,27 */
   4559 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4560 
   4561 			/* CTRL_EXT */
   4562 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4563 			reg |= __BIT(22);	/* Set bit 22 */
   4564 			/*
   4565 			 * Enable PHY low-power state when MAC is at D3
   4566 			 * w/o WoL
   4567 			 */
   4568 			if (sc->sc_type >= WM_T_PCH)
   4569 				reg |= CTRL_EXT_PHYPDEN;
   4570 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4571 
   4572 			/* TARC1 */
   4573 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4574 			/* bit 28 */
   4575 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4576 				tarc1 &= ~__BIT(28);
   4577 			else
   4578 				tarc1 |= __BIT(28);
   4579 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4580 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4581 
   4582 			/* Device Status */
   4583 			if (sc->sc_type == WM_T_ICH8) {
   4584 				reg = CSR_READ(sc, WMREG_STATUS);
   4585 				reg &= ~__BIT(31);
   4586 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4587 
   4588 			}
   4589 
   4590 			/* IOSFPC */
   4591 			if (sc->sc_type == WM_T_PCH_SPT) {
   4592 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4593 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4594 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4595 			}
   4596 			/*
   4597 			 * Work-around descriptor data corruption issue during
   4598 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4599 			 * capability.
   4600 			 */
   4601 			reg = CSR_READ(sc, WMREG_RFCTL);
   4602 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4603 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4604 			break;
   4605 		default:
   4606 			break;
   4607 		}
   4608 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4609 
   4610 		switch (sc->sc_type) {
   4611 		/*
   4612 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4613 		 * Avoid RSS Hash Value bug.
   4614 		 */
   4615 		case WM_T_82571:
   4616 		case WM_T_82572:
   4617 		case WM_T_82573:
   4618 		case WM_T_80003:
   4619 		case WM_T_ICH8:
   4620 			reg = CSR_READ(sc, WMREG_RFCTL);
   4621 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4622 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4623 			break;
   4624 		case WM_T_82574:
   4625 			/* Use extened Rx descriptor. */
   4626 			reg = CSR_READ(sc, WMREG_RFCTL);
   4627 			reg |= WMREG_RFCTL_EXSTEN;
   4628 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4629 			break;
   4630 		default:
   4631 			break;
   4632 		}
   4633 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4634 		/*
   4635 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4636 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4637 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4638 		 * Correctly by the Device"
   4639 		 *
   4640 		 * I354(C2000) Errata AVR53:
   4641 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4642 		 * Hang"
   4643 		 */
   4644 		reg = CSR_READ(sc, WMREG_RFCTL);
   4645 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4646 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4647 	}
   4648 }
   4649 
   4650 static uint32_t
   4651 wm_rxpbs_adjust_82580(uint32_t val)
   4652 {
   4653 	uint32_t rv = 0;
   4654 
   4655 	if (val < __arraycount(wm_82580_rxpbs_table))
   4656 		rv = wm_82580_rxpbs_table[val];
   4657 
   4658 	return rv;
   4659 }
   4660 
   4661 /*
   4662  * wm_reset_phy:
   4663  *
   4664  *	generic PHY reset function.
   4665  *	Same as e1000_phy_hw_reset_generic()
   4666  */
   4667 static int
   4668 wm_reset_phy(struct wm_softc *sc)
   4669 {
   4670 	uint32_t reg;
   4671 
   4672 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4673 		device_xname(sc->sc_dev), __func__));
   4674 	if (wm_phy_resetisblocked(sc))
   4675 		return -1;
   4676 
   4677 	sc->phy.acquire(sc);
   4678 
   4679 	reg = CSR_READ(sc, WMREG_CTRL);
   4680 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4681 	CSR_WRITE_FLUSH(sc);
   4682 
   4683 	delay(sc->phy.reset_delay_us);
   4684 
   4685 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4686 	CSR_WRITE_FLUSH(sc);
   4687 
   4688 	delay(150);
   4689 
   4690 	sc->phy.release(sc);
   4691 
   4692 	wm_get_cfg_done(sc);
   4693 	wm_phy_post_reset(sc);
   4694 
   4695 	return 0;
   4696 }
   4697 
   4698 /*
   4699  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4700  * so it is enough to check sc->sc_queue[0] only.
   4701  */
   4702 static void
   4703 wm_flush_desc_rings(struct wm_softc *sc)
   4704 {
   4705 	pcireg_t preg;
   4706 	uint32_t reg;
   4707 	struct wm_txqueue *txq;
   4708 	wiseman_txdesc_t *txd;
   4709 	int nexttx;
   4710 	uint32_t rctl;
   4711 
   4712 	/* First, disable MULR fix in FEXTNVM11 */
   4713 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4714 	reg |= FEXTNVM11_DIS_MULRFIX;
   4715 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4716 
   4717 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4718 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4719 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4720 		return;
   4721 
   4722 	/* TX */
   4723 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4724 	    preg, reg);
   4725 	reg = CSR_READ(sc, WMREG_TCTL);
   4726 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4727 
   4728 	txq = &sc->sc_queue[0].wmq_txq;
   4729 	nexttx = txq->txq_next;
   4730 	txd = &txq->txq_descs[nexttx];
   4731 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4732 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4733 	txd->wtx_fields.wtxu_status = 0;
   4734 	txd->wtx_fields.wtxu_options = 0;
   4735 	txd->wtx_fields.wtxu_vlan = 0;
   4736 
   4737 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4738 	    BUS_SPACE_BARRIER_WRITE);
   4739 
   4740 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4741 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4742 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4743 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4744 	delay(250);
   4745 
   4746 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4747 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4748 		return;
   4749 
   4750 	/* RX */
   4751 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4752 	rctl = CSR_READ(sc, WMREG_RCTL);
   4753 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4754 	CSR_WRITE_FLUSH(sc);
   4755 	delay(150);
   4756 
   4757 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4758 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4759 	reg &= 0xffffc000;
   4760 	/*
   4761 	 * Update thresholds: prefetch threshold to 31, host threshold
   4762 	 * to 1 and make sure the granularity is "descriptors" and not
   4763 	 * "cache lines"
   4764 	 */
   4765 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4766 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4767 
   4768 	/* Momentarily enable the RX ring for the changes to take effect */
   4769 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4770 	CSR_WRITE_FLUSH(sc);
   4771 	delay(150);
   4772 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4773 }
   4774 
   4775 /*
   4776  * wm_reset:
   4777  *
   4778  *	Reset the i82542 chip.
   4779  */
   4780 static void
   4781 wm_reset(struct wm_softc *sc)
   4782 {
   4783 	int phy_reset = 0;
   4784 	int i, error = 0;
   4785 	uint32_t reg;
   4786 	uint16_t kmreg;
   4787 	int rv;
   4788 
   4789 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4790 		device_xname(sc->sc_dev), __func__));
   4791 	KASSERT(sc->sc_type != 0);
   4792 
   4793 	/*
   4794 	 * Allocate on-chip memory according to the MTU size.
   4795 	 * The Packet Buffer Allocation register must be written
   4796 	 * before the chip is reset.
   4797 	 */
   4798 	switch (sc->sc_type) {
   4799 	case WM_T_82547:
   4800 	case WM_T_82547_2:
   4801 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4802 		    PBA_22K : PBA_30K;
   4803 		for (i = 0; i < sc->sc_nqueues; i++) {
   4804 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4805 			txq->txq_fifo_head = 0;
   4806 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4807 			txq->txq_fifo_size =
   4808 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4809 			txq->txq_fifo_stall = 0;
   4810 		}
   4811 		break;
   4812 	case WM_T_82571:
   4813 	case WM_T_82572:
   4814 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4815 	case WM_T_80003:
   4816 		sc->sc_pba = PBA_32K;
   4817 		break;
   4818 	case WM_T_82573:
   4819 		sc->sc_pba = PBA_12K;
   4820 		break;
   4821 	case WM_T_82574:
   4822 	case WM_T_82583:
   4823 		sc->sc_pba = PBA_20K;
   4824 		break;
   4825 	case WM_T_82576:
   4826 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4827 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4828 		break;
   4829 	case WM_T_82580:
   4830 	case WM_T_I350:
   4831 	case WM_T_I354:
   4832 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4833 		break;
   4834 	case WM_T_I210:
   4835 	case WM_T_I211:
   4836 		sc->sc_pba = PBA_34K;
   4837 		break;
   4838 	case WM_T_ICH8:
   4839 		/* Workaround for a bit corruption issue in FIFO memory */
   4840 		sc->sc_pba = PBA_8K;
   4841 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4842 		break;
   4843 	case WM_T_ICH9:
   4844 	case WM_T_ICH10:
   4845 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4846 		    PBA_14K : PBA_10K;
   4847 		break;
   4848 	case WM_T_PCH:
   4849 	case WM_T_PCH2:	/* XXX 14K? */
   4850 	case WM_T_PCH_LPT:
   4851 	case WM_T_PCH_SPT:
   4852 	case WM_T_PCH_CNP:
   4853 		sc->sc_pba = PBA_26K;
   4854 		break;
   4855 	default:
   4856 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4857 		    PBA_40K : PBA_48K;
   4858 		break;
   4859 	}
   4860 	/*
   4861 	 * Only old or non-multiqueue devices have the PBA register
   4862 	 * XXX Need special handling for 82575.
   4863 	 */
   4864 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4865 	    || (sc->sc_type == WM_T_82575))
   4866 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4867 
   4868 	/* Prevent the PCI-E bus from sticking */
   4869 	if (sc->sc_flags & WM_F_PCIE) {
   4870 		int timeout = 800;
   4871 
   4872 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4873 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4874 
   4875 		while (timeout--) {
   4876 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4877 			    == 0)
   4878 				break;
   4879 			delay(100);
   4880 		}
   4881 		if (timeout == 0)
   4882 			device_printf(sc->sc_dev,
   4883 			    "failed to disable busmastering\n");
   4884 	}
   4885 
   4886 	/* Set the completion timeout for interface */
   4887 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4888 	    || (sc->sc_type == WM_T_82580)
   4889 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4890 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4891 		wm_set_pcie_completion_timeout(sc);
   4892 
   4893 	/* Clear interrupt */
   4894 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4895 	if (wm_is_using_msix(sc)) {
   4896 		if (sc->sc_type != WM_T_82574) {
   4897 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4898 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4899 		} else
   4900 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4901 	}
   4902 
   4903 	/* Stop the transmit and receive processes. */
   4904 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4905 	sc->sc_rctl &= ~RCTL_EN;
   4906 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4907 	CSR_WRITE_FLUSH(sc);
   4908 
   4909 	/* XXX set_tbi_sbp_82543() */
   4910 
   4911 	delay(10*1000);
   4912 
   4913 	/* Must acquire the MDIO ownership before MAC reset */
   4914 	switch (sc->sc_type) {
   4915 	case WM_T_82573:
   4916 	case WM_T_82574:
   4917 	case WM_T_82583:
   4918 		error = wm_get_hw_semaphore_82573(sc);
   4919 		break;
   4920 	default:
   4921 		break;
   4922 	}
   4923 
   4924 	/*
   4925 	 * 82541 Errata 29? & 82547 Errata 28?
   4926 	 * See also the description about PHY_RST bit in CTRL register
   4927 	 * in 8254x_GBe_SDM.pdf.
   4928 	 */
   4929 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4930 		CSR_WRITE(sc, WMREG_CTRL,
   4931 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4932 		CSR_WRITE_FLUSH(sc);
   4933 		delay(5000);
   4934 	}
   4935 
   4936 	switch (sc->sc_type) {
   4937 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4938 	case WM_T_82541:
   4939 	case WM_T_82541_2:
   4940 	case WM_T_82547:
   4941 	case WM_T_82547_2:
   4942 		/*
   4943 		 * On some chipsets, a reset through a memory-mapped write
   4944 		 * cycle can cause the chip to reset before completing the
   4945 		 * write cycle. This causes major headache that can be avoided
   4946 		 * by issuing the reset via indirect register writes through
   4947 		 * I/O space.
   4948 		 *
   4949 		 * So, if we successfully mapped the I/O BAR at attach time,
   4950 		 * use that. Otherwise, try our luck with a memory-mapped
   4951 		 * reset.
   4952 		 */
   4953 		if (sc->sc_flags & WM_F_IOH_VALID)
   4954 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4955 		else
   4956 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4957 		break;
   4958 	case WM_T_82545_3:
   4959 	case WM_T_82546_3:
   4960 		/* Use the shadow control register on these chips. */
   4961 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4962 		break;
   4963 	case WM_T_80003:
   4964 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4965 		sc->phy.acquire(sc);
   4966 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4967 		sc->phy.release(sc);
   4968 		break;
   4969 	case WM_T_ICH8:
   4970 	case WM_T_ICH9:
   4971 	case WM_T_ICH10:
   4972 	case WM_T_PCH:
   4973 	case WM_T_PCH2:
   4974 	case WM_T_PCH_LPT:
   4975 	case WM_T_PCH_SPT:
   4976 	case WM_T_PCH_CNP:
   4977 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4978 		if (wm_phy_resetisblocked(sc) == false) {
   4979 			/*
   4980 			 * Gate automatic PHY configuration by hardware on
   4981 			 * non-managed 82579
   4982 			 */
   4983 			if ((sc->sc_type == WM_T_PCH2)
   4984 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4985 				== 0))
   4986 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4987 
   4988 			reg |= CTRL_PHY_RESET;
   4989 			phy_reset = 1;
   4990 		} else
   4991 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4992 		sc->phy.acquire(sc);
   4993 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4994 		/* Don't insert a completion barrier when reset */
   4995 		delay(20*1000);
   4996 		mutex_exit(sc->sc_ich_phymtx);
   4997 		break;
   4998 	case WM_T_82580:
   4999 	case WM_T_I350:
   5000 	case WM_T_I354:
   5001 	case WM_T_I210:
   5002 	case WM_T_I211:
   5003 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5004 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5005 			CSR_WRITE_FLUSH(sc);
   5006 		delay(5000);
   5007 		break;
   5008 	case WM_T_82542_2_0:
   5009 	case WM_T_82542_2_1:
   5010 	case WM_T_82543:
   5011 	case WM_T_82540:
   5012 	case WM_T_82545:
   5013 	case WM_T_82546:
   5014 	case WM_T_82571:
   5015 	case WM_T_82572:
   5016 	case WM_T_82573:
   5017 	case WM_T_82574:
   5018 	case WM_T_82575:
   5019 	case WM_T_82576:
   5020 	case WM_T_82583:
   5021 	default:
   5022 		/* Everything else can safely use the documented method. */
   5023 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5024 		break;
   5025 	}
   5026 
   5027 	/* Must release the MDIO ownership after MAC reset */
   5028 	switch (sc->sc_type) {
   5029 	case WM_T_82573:
   5030 	case WM_T_82574:
   5031 	case WM_T_82583:
   5032 		if (error == 0)
   5033 			wm_put_hw_semaphore_82573(sc);
   5034 		break;
   5035 	default:
   5036 		break;
   5037 	}
   5038 
   5039 	/* Set Phy Config Counter to 50msec */
   5040 	if (sc->sc_type == WM_T_PCH2) {
   5041 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5042 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5043 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5044 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5045 	}
   5046 
   5047 	if (phy_reset != 0)
   5048 		wm_get_cfg_done(sc);
   5049 
   5050 	/* Reload EEPROM */
   5051 	switch (sc->sc_type) {
   5052 	case WM_T_82542_2_0:
   5053 	case WM_T_82542_2_1:
   5054 	case WM_T_82543:
   5055 	case WM_T_82544:
   5056 		delay(10);
   5057 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5058 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5059 		CSR_WRITE_FLUSH(sc);
   5060 		delay(2000);
   5061 		break;
   5062 	case WM_T_82540:
   5063 	case WM_T_82545:
   5064 	case WM_T_82545_3:
   5065 	case WM_T_82546:
   5066 	case WM_T_82546_3:
   5067 		delay(5*1000);
   5068 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5069 		break;
   5070 	case WM_T_82541:
   5071 	case WM_T_82541_2:
   5072 	case WM_T_82547:
   5073 	case WM_T_82547_2:
   5074 		delay(20000);
   5075 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5076 		break;
   5077 	case WM_T_82571:
   5078 	case WM_T_82572:
   5079 	case WM_T_82573:
   5080 	case WM_T_82574:
   5081 	case WM_T_82583:
   5082 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5083 			delay(10);
   5084 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5085 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5086 			CSR_WRITE_FLUSH(sc);
   5087 		}
   5088 		/* check EECD_EE_AUTORD */
   5089 		wm_get_auto_rd_done(sc);
   5090 		/*
   5091 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5092 		 * is set.
   5093 		 */
   5094 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5095 		    || (sc->sc_type == WM_T_82583))
   5096 			delay(25*1000);
   5097 		break;
   5098 	case WM_T_82575:
   5099 	case WM_T_82576:
   5100 	case WM_T_82580:
   5101 	case WM_T_I350:
   5102 	case WM_T_I354:
   5103 	case WM_T_I210:
   5104 	case WM_T_I211:
   5105 	case WM_T_80003:
   5106 		/* check EECD_EE_AUTORD */
   5107 		wm_get_auto_rd_done(sc);
   5108 		break;
   5109 	case WM_T_ICH8:
   5110 	case WM_T_ICH9:
   5111 	case WM_T_ICH10:
   5112 	case WM_T_PCH:
   5113 	case WM_T_PCH2:
   5114 	case WM_T_PCH_LPT:
   5115 	case WM_T_PCH_SPT:
   5116 	case WM_T_PCH_CNP:
   5117 		break;
   5118 	default:
   5119 		panic("%s: unknown type\n", __func__);
   5120 	}
   5121 
   5122 	/* Check whether EEPROM is present or not */
   5123 	switch (sc->sc_type) {
   5124 	case WM_T_82575:
   5125 	case WM_T_82576:
   5126 	case WM_T_82580:
   5127 	case WM_T_I350:
   5128 	case WM_T_I354:
   5129 	case WM_T_ICH8:
   5130 	case WM_T_ICH9:
   5131 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5132 			/* Not found */
   5133 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5134 			if (sc->sc_type == WM_T_82575)
   5135 				wm_reset_init_script_82575(sc);
   5136 		}
   5137 		break;
   5138 	default:
   5139 		break;
   5140 	}
   5141 
   5142 	if (phy_reset != 0)
   5143 		wm_phy_post_reset(sc);
   5144 
   5145 	if ((sc->sc_type == WM_T_82580)
   5146 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5147 		/* Clear global device reset status bit */
   5148 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5149 	}
   5150 
   5151 	/* Clear any pending interrupt events. */
   5152 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5153 	reg = CSR_READ(sc, WMREG_ICR);
   5154 	if (wm_is_using_msix(sc)) {
   5155 		if (sc->sc_type != WM_T_82574) {
   5156 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5157 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5158 		} else
   5159 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5160 	}
   5161 
   5162 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5163 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5164 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5165 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5166 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5167 		reg |= KABGTXD_BGSQLBIAS;
   5168 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5169 	}
   5170 
   5171 	/* Reload sc_ctrl */
   5172 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5173 
   5174 	wm_set_eee(sc);
   5175 
   5176 	/*
   5177 	 * For PCH, this write will make sure that any noise will be detected
   5178 	 * as a CRC error and be dropped rather than show up as a bad packet
   5179 	 * to the DMA engine
   5180 	 */
   5181 	if (sc->sc_type == WM_T_PCH)
   5182 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5183 
   5184 	if (sc->sc_type >= WM_T_82544)
   5185 		CSR_WRITE(sc, WMREG_WUC, 0);
   5186 
   5187 	if (sc->sc_type < WM_T_82575)
   5188 		wm_disable_aspm(sc); /* Workaround for some chips */
   5189 
   5190 	wm_reset_mdicnfg_82580(sc);
   5191 
   5192 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5193 		wm_pll_workaround_i210(sc);
   5194 
   5195 	if (sc->sc_type == WM_T_80003) {
   5196 		/* Default to TRUE to enable the MDIC W/A */
   5197 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5198 
   5199 		rv = wm_kmrn_readreg(sc,
   5200 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5201 		if (rv == 0) {
   5202 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5203 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5204 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5205 			else
   5206 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5207 		}
   5208 	}
   5209 }
   5210 
   5211 /*
   5212  * wm_add_rxbuf:
   5213  *
   5214  *	Add a receive buffer to the indiciated descriptor.
   5215  */
   5216 static int
   5217 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5218 {
   5219 	struct wm_softc *sc = rxq->rxq_sc;
   5220 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5221 	struct mbuf *m;
   5222 	int error;
   5223 
   5224 	KASSERT(mutex_owned(rxq->rxq_lock));
   5225 
   5226 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5227 	if (m == NULL)
   5228 		return ENOBUFS;
   5229 
   5230 	MCLGET(m, M_DONTWAIT);
   5231 	if ((m->m_flags & M_EXT) == 0) {
   5232 		m_freem(m);
   5233 		return ENOBUFS;
   5234 	}
   5235 
   5236 	if (rxs->rxs_mbuf != NULL)
   5237 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5238 
   5239 	rxs->rxs_mbuf = m;
   5240 
   5241 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5242 	/*
   5243 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5244 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5245 	 */
   5246 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5247 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5248 	if (error) {
   5249 		/* XXX XXX XXX */
   5250 		aprint_error_dev(sc->sc_dev,
   5251 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5252 		panic("wm_add_rxbuf");
   5253 	}
   5254 
   5255 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5256 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5257 
   5258 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5259 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5260 			wm_init_rxdesc(rxq, idx);
   5261 	} else
   5262 		wm_init_rxdesc(rxq, idx);
   5263 
   5264 	return 0;
   5265 }
   5266 
   5267 /*
   5268  * wm_rxdrain:
   5269  *
   5270  *	Drain the receive queue.
   5271  */
   5272 static void
   5273 wm_rxdrain(struct wm_rxqueue *rxq)
   5274 {
   5275 	struct wm_softc *sc = rxq->rxq_sc;
   5276 	struct wm_rxsoft *rxs;
   5277 	int i;
   5278 
   5279 	KASSERT(mutex_owned(rxq->rxq_lock));
   5280 
   5281 	for (i = 0; i < WM_NRXDESC; i++) {
   5282 		rxs = &rxq->rxq_soft[i];
   5283 		if (rxs->rxs_mbuf != NULL) {
   5284 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5285 			m_freem(rxs->rxs_mbuf);
   5286 			rxs->rxs_mbuf = NULL;
   5287 		}
   5288 	}
   5289 }
   5290 
   5291 /*
   5292  * Setup registers for RSS.
   5293  *
   5294  * XXX not yet VMDq support
   5295  */
   5296 static void
   5297 wm_init_rss(struct wm_softc *sc)
   5298 {
   5299 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5300 	int i;
   5301 
   5302 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5303 
   5304 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5305 		unsigned int qid, reta_ent;
   5306 
   5307 		qid  = i % sc->sc_nqueues;
   5308 		switch (sc->sc_type) {
   5309 		case WM_T_82574:
   5310 			reta_ent = __SHIFTIN(qid,
   5311 			    RETA_ENT_QINDEX_MASK_82574);
   5312 			break;
   5313 		case WM_T_82575:
   5314 			reta_ent = __SHIFTIN(qid,
   5315 			    RETA_ENT_QINDEX1_MASK_82575);
   5316 			break;
   5317 		default:
   5318 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5319 			break;
   5320 		}
   5321 
   5322 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5323 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5324 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5325 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5326 	}
   5327 
   5328 	rss_getkey((uint8_t *)rss_key);
   5329 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5330 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5331 
   5332 	if (sc->sc_type == WM_T_82574)
   5333 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5334 	else
   5335 		mrqc = MRQC_ENABLE_RSS_MQ;
   5336 
   5337 	/*
   5338 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5339 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5340 	 */
   5341 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5342 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5343 #if 0
   5344 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5345 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5346 #endif
   5347 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5348 
   5349 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5350 }
   5351 
   5352 /*
   5353  * Adjust TX and RX queue numbers which the system actulally uses.
   5354  *
   5355  * The numbers are affected by below parameters.
   5356  *     - The nubmer of hardware queues
   5357  *     - The number of MSI-X vectors (= "nvectors" argument)
   5358  *     - ncpu
   5359  */
   5360 static void
   5361 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5362 {
   5363 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5364 
   5365 	if (nvectors < 2) {
   5366 		sc->sc_nqueues = 1;
   5367 		return;
   5368 	}
   5369 
   5370 	switch (sc->sc_type) {
   5371 	case WM_T_82572:
   5372 		hw_ntxqueues = 2;
   5373 		hw_nrxqueues = 2;
   5374 		break;
   5375 	case WM_T_82574:
   5376 		hw_ntxqueues = 2;
   5377 		hw_nrxqueues = 2;
   5378 		break;
   5379 	case WM_T_82575:
   5380 		hw_ntxqueues = 4;
   5381 		hw_nrxqueues = 4;
   5382 		break;
   5383 	case WM_T_82576:
   5384 		hw_ntxqueues = 16;
   5385 		hw_nrxqueues = 16;
   5386 		break;
   5387 	case WM_T_82580:
   5388 	case WM_T_I350:
   5389 	case WM_T_I354:
   5390 		hw_ntxqueues = 8;
   5391 		hw_nrxqueues = 8;
   5392 		break;
   5393 	case WM_T_I210:
   5394 		hw_ntxqueues = 4;
   5395 		hw_nrxqueues = 4;
   5396 		break;
   5397 	case WM_T_I211:
   5398 		hw_ntxqueues = 2;
   5399 		hw_nrxqueues = 2;
   5400 		break;
   5401 		/*
   5402 		 * As below ethernet controllers does not support MSI-X,
   5403 		 * this driver let them not use multiqueue.
   5404 		 *     - WM_T_80003
   5405 		 *     - WM_T_ICH8
   5406 		 *     - WM_T_ICH9
   5407 		 *     - WM_T_ICH10
   5408 		 *     - WM_T_PCH
   5409 		 *     - WM_T_PCH2
   5410 		 *     - WM_T_PCH_LPT
   5411 		 */
   5412 	default:
   5413 		hw_ntxqueues = 1;
   5414 		hw_nrxqueues = 1;
   5415 		break;
   5416 	}
   5417 
   5418 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5419 
   5420 	/*
   5421 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5422 	 * the number of queues used actually.
   5423 	 */
   5424 	if (nvectors < hw_nqueues + 1)
   5425 		sc->sc_nqueues = nvectors - 1;
   5426 	else
   5427 		sc->sc_nqueues = hw_nqueues;
   5428 
   5429 	/*
   5430 	 * As queues more then cpus cannot improve scaling, we limit
   5431 	 * the number of queues used actually.
   5432 	 */
   5433 	if (ncpu < sc->sc_nqueues)
   5434 		sc->sc_nqueues = ncpu;
   5435 }
   5436 
   5437 static inline bool
   5438 wm_is_using_msix(struct wm_softc *sc)
   5439 {
   5440 
   5441 	return (sc->sc_nintrs > 1);
   5442 }
   5443 
   5444 static inline bool
   5445 wm_is_using_multiqueue(struct wm_softc *sc)
   5446 {
   5447 
   5448 	return (sc->sc_nqueues > 1);
   5449 }
   5450 
   5451 static int
   5452 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5453 {
   5454 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5455 
   5456 	wmq->wmq_id = qidx;
   5457 	wmq->wmq_intr_idx = intr_idx;
   5458 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5459 	    wm_handle_queue, wmq);
   5460 	if (wmq->wmq_si != NULL)
   5461 		return 0;
   5462 
   5463 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5464 	    wmq->wmq_id);
   5465 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5466 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5467 	return ENOMEM;
   5468 }
   5469 
   5470 /*
   5471  * Both single interrupt MSI and INTx can use this function.
   5472  */
   5473 static int
   5474 wm_setup_legacy(struct wm_softc *sc)
   5475 {
   5476 	pci_chipset_tag_t pc = sc->sc_pc;
   5477 	const char *intrstr = NULL;
   5478 	char intrbuf[PCI_INTRSTR_LEN];
   5479 	int error;
   5480 
   5481 	error = wm_alloc_txrx_queues(sc);
   5482 	if (error) {
   5483 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5484 		    error);
   5485 		return ENOMEM;
   5486 	}
   5487 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5488 	    sizeof(intrbuf));
   5489 #ifdef WM_MPSAFE
   5490 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5491 #endif
   5492 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5493 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5494 	if (sc->sc_ihs[0] == NULL) {
   5495 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5496 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5497 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5498 		return ENOMEM;
   5499 	}
   5500 
   5501 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5502 	sc->sc_nintrs = 1;
   5503 
   5504 	return wm_softint_establish_queue(sc, 0, 0);
   5505 }
   5506 
   5507 static int
   5508 wm_setup_msix(struct wm_softc *sc)
   5509 {
   5510 	void *vih;
   5511 	kcpuset_t *affinity;
   5512 	int qidx, error, intr_idx, txrx_established;
   5513 	pci_chipset_tag_t pc = sc->sc_pc;
   5514 	const char *intrstr = NULL;
   5515 	char intrbuf[PCI_INTRSTR_LEN];
   5516 	char intr_xname[INTRDEVNAMEBUF];
   5517 
   5518 	if (sc->sc_nqueues < ncpu) {
   5519 		/*
   5520 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5521 		 * interrupts start from CPU#1.
   5522 		 */
   5523 		sc->sc_affinity_offset = 1;
   5524 	} else {
   5525 		/*
   5526 		 * In this case, this device use all CPUs. So, we unify
   5527 		 * affinitied cpu_index to msix vector number for readability.
   5528 		 */
   5529 		sc->sc_affinity_offset = 0;
   5530 	}
   5531 
   5532 	error = wm_alloc_txrx_queues(sc);
   5533 	if (error) {
   5534 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5535 		    error);
   5536 		return ENOMEM;
   5537 	}
   5538 
   5539 	kcpuset_create(&affinity, false);
   5540 	intr_idx = 0;
   5541 
   5542 	/*
   5543 	 * TX and RX
   5544 	 */
   5545 	txrx_established = 0;
   5546 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5547 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5548 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5549 
   5550 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5551 		    sizeof(intrbuf));
   5552 #ifdef WM_MPSAFE
   5553 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5554 		    PCI_INTR_MPSAFE, true);
   5555 #endif
   5556 		memset(intr_xname, 0, sizeof(intr_xname));
   5557 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5558 		    device_xname(sc->sc_dev), qidx);
   5559 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5560 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5561 		if (vih == NULL) {
   5562 			aprint_error_dev(sc->sc_dev,
   5563 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5564 			    intrstr ? " at " : "",
   5565 			    intrstr ? intrstr : "");
   5566 
   5567 			goto fail;
   5568 		}
   5569 		kcpuset_zero(affinity);
   5570 		/* Round-robin affinity */
   5571 		kcpuset_set(affinity, affinity_to);
   5572 		error = interrupt_distribute(vih, affinity, NULL);
   5573 		if (error == 0) {
   5574 			aprint_normal_dev(sc->sc_dev,
   5575 			    "for TX and RX interrupting at %s affinity to %u\n",
   5576 			    intrstr, affinity_to);
   5577 		} else {
   5578 			aprint_normal_dev(sc->sc_dev,
   5579 			    "for TX and RX interrupting at %s\n", intrstr);
   5580 		}
   5581 		sc->sc_ihs[intr_idx] = vih;
   5582 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5583 			goto fail;
   5584 		txrx_established++;
   5585 		intr_idx++;
   5586 	}
   5587 
   5588 	/* LINK */
   5589 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5590 	    sizeof(intrbuf));
   5591 #ifdef WM_MPSAFE
   5592 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5593 #endif
   5594 	memset(intr_xname, 0, sizeof(intr_xname));
   5595 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5596 	    device_xname(sc->sc_dev));
   5597 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5598 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5599 	if (vih == NULL) {
   5600 		aprint_error_dev(sc->sc_dev,
   5601 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5602 		    intrstr ? " at " : "",
   5603 		    intrstr ? intrstr : "");
   5604 
   5605 		goto fail;
   5606 	}
   5607 	/* Keep default affinity to LINK interrupt */
   5608 	aprint_normal_dev(sc->sc_dev,
   5609 	    "for LINK interrupting at %s\n", intrstr);
   5610 	sc->sc_ihs[intr_idx] = vih;
   5611 	sc->sc_link_intr_idx = intr_idx;
   5612 
   5613 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5614 	kcpuset_destroy(affinity);
   5615 	return 0;
   5616 
   5617  fail:
   5618 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5619 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5620 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5621 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5622 	}
   5623 
   5624 	kcpuset_destroy(affinity);
   5625 	return ENOMEM;
   5626 }
   5627 
   5628 static void
   5629 wm_unset_stopping_flags(struct wm_softc *sc)
   5630 {
   5631 	int i;
   5632 
   5633 	KASSERT(WM_CORE_LOCKED(sc));
   5634 
   5635 	/* Must unset stopping flags in ascending order. */
   5636 	for (i = 0; i < sc->sc_nqueues; i++) {
   5637 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5638 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5639 
   5640 		mutex_enter(txq->txq_lock);
   5641 		txq->txq_stopping = false;
   5642 		mutex_exit(txq->txq_lock);
   5643 
   5644 		mutex_enter(rxq->rxq_lock);
   5645 		rxq->rxq_stopping = false;
   5646 		mutex_exit(rxq->rxq_lock);
   5647 	}
   5648 
   5649 	sc->sc_core_stopping = false;
   5650 }
   5651 
   5652 static void
   5653 wm_set_stopping_flags(struct wm_softc *sc)
   5654 {
   5655 	int i;
   5656 
   5657 	KASSERT(WM_CORE_LOCKED(sc));
   5658 
   5659 	sc->sc_core_stopping = true;
   5660 
   5661 	/* Must set stopping flags in ascending order. */
   5662 	for (i = 0; i < sc->sc_nqueues; i++) {
   5663 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5664 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5665 
   5666 		mutex_enter(rxq->rxq_lock);
   5667 		rxq->rxq_stopping = true;
   5668 		mutex_exit(rxq->rxq_lock);
   5669 
   5670 		mutex_enter(txq->txq_lock);
   5671 		txq->txq_stopping = true;
   5672 		mutex_exit(txq->txq_lock);
   5673 	}
   5674 }
   5675 
   5676 /*
   5677  * Write interrupt interval value to ITR or EITR
   5678  */
   5679 static void
   5680 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5681 {
   5682 
   5683 	if (!wmq->wmq_set_itr)
   5684 		return;
   5685 
   5686 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5687 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5688 
   5689 		/*
   5690 		 * 82575 doesn't have CNT_INGR field.
   5691 		 * So, overwrite counter field by software.
   5692 		 */
   5693 		if (sc->sc_type == WM_T_82575)
   5694 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5695 		else
   5696 			eitr |= EITR_CNT_INGR;
   5697 
   5698 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5699 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5700 		/*
   5701 		 * 82574 has both ITR and EITR. SET EITR when we use
   5702 		 * the multi queue function with MSI-X.
   5703 		 */
   5704 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5705 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5706 	} else {
   5707 		KASSERT(wmq->wmq_id == 0);
   5708 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5709 	}
   5710 
   5711 	wmq->wmq_set_itr = false;
   5712 }
   5713 
   5714 /*
   5715  * TODO
   5716  * Below dynamic calculation of itr is almost the same as linux igb,
   5717  * however it does not fit to wm(4). So, we will have been disable AIM
   5718  * until we will find appropriate calculation of itr.
   5719  */
   5720 /*
   5721  * calculate interrupt interval value to be going to write register in
   5722  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5723  */
   5724 static void
   5725 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5726 {
   5727 #ifdef NOTYET
   5728 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5729 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5730 	uint32_t avg_size = 0;
   5731 	uint32_t new_itr;
   5732 
   5733 	if (rxq->rxq_packets)
   5734 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5735 	if (txq->txq_packets)
   5736 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5737 
   5738 	if (avg_size == 0) {
   5739 		new_itr = 450; /* restore default value */
   5740 		goto out;
   5741 	}
   5742 
   5743 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5744 	avg_size += 24;
   5745 
   5746 	/* Don't starve jumbo frames */
   5747 	avg_size = uimin(avg_size, 3000);
   5748 
   5749 	/* Give a little boost to mid-size frames */
   5750 	if ((avg_size > 300) && (avg_size < 1200))
   5751 		new_itr = avg_size / 3;
   5752 	else
   5753 		new_itr = avg_size / 2;
   5754 
   5755 out:
   5756 	/*
   5757 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5758 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5759 	 */
   5760 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5761 		new_itr *= 4;
   5762 
   5763 	if (new_itr != wmq->wmq_itr) {
   5764 		wmq->wmq_itr = new_itr;
   5765 		wmq->wmq_set_itr = true;
   5766 	} else
   5767 		wmq->wmq_set_itr = false;
   5768 
   5769 	rxq->rxq_packets = 0;
   5770 	rxq->rxq_bytes = 0;
   5771 	txq->txq_packets = 0;
   5772 	txq->txq_bytes = 0;
   5773 #endif
   5774 }
   5775 
   5776 static void
   5777 wm_init_sysctls(struct wm_softc *sc)
   5778 {
   5779 	struct sysctllog **log;
   5780 	const struct sysctlnode *rnode, *cnode;
   5781 	int rv;
   5782 	const char *dvname;
   5783 
   5784 	log = &sc->sc_sysctllog;
   5785 	dvname = device_xname(sc->sc_dev);
   5786 
   5787 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5788 	    0, CTLTYPE_NODE, dvname,
   5789 	    SYSCTL_DESCR("wm information and settings"),
   5790 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5791 	if (rv != 0)
   5792 		goto err;
   5793 
   5794 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5795 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5796 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5797 	if (rv != 0)
   5798 		goto teardown;
   5799 
   5800 	return;
   5801 
   5802 teardown:
   5803 	sysctl_teardown(log);
   5804 err:
   5805 	sc->sc_sysctllog = NULL;
   5806 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5807 	    __func__, rv);
   5808 }
   5809 
   5810 /*
   5811  * wm_init:		[ifnet interface function]
   5812  *
   5813  *	Initialize the interface.
   5814  */
   5815 static int
   5816 wm_init(struct ifnet *ifp)
   5817 {
   5818 	struct wm_softc *sc = ifp->if_softc;
   5819 	int ret;
   5820 
   5821 	WM_CORE_LOCK(sc);
   5822 	ret = wm_init_locked(ifp);
   5823 	WM_CORE_UNLOCK(sc);
   5824 
   5825 	return ret;
   5826 }
   5827 
   5828 static int
   5829 wm_init_locked(struct ifnet *ifp)
   5830 {
   5831 	struct wm_softc *sc = ifp->if_softc;
   5832 	struct ethercom *ec = &sc->sc_ethercom;
   5833 	int i, j, trynum, error = 0;
   5834 	uint32_t reg, sfp_mask = 0;
   5835 
   5836 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5837 		device_xname(sc->sc_dev), __func__));
   5838 	KASSERT(WM_CORE_LOCKED(sc));
   5839 
   5840 	/*
   5841 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5842 	 * There is a small but measurable benefit to avoiding the adjusment
   5843 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5844 	 * on such platforms.  One possibility is that the DMA itself is
   5845 	 * slightly more efficient if the front of the entire packet (instead
   5846 	 * of the front of the headers) is aligned.
   5847 	 *
   5848 	 * Note we must always set align_tweak to 0 if we are using
   5849 	 * jumbo frames.
   5850 	 */
   5851 #ifdef __NO_STRICT_ALIGNMENT
   5852 	sc->sc_align_tweak = 0;
   5853 #else
   5854 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5855 		sc->sc_align_tweak = 0;
   5856 	else
   5857 		sc->sc_align_tweak = 2;
   5858 #endif /* __NO_STRICT_ALIGNMENT */
   5859 
   5860 	/* Cancel any pending I/O. */
   5861 	wm_stop_locked(ifp, false, false);
   5862 
   5863 	/* Update statistics before reset */
   5864 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   5865 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   5866 
   5867 	/* PCH_SPT hardware workaround */
   5868 	if (sc->sc_type == WM_T_PCH_SPT)
   5869 		wm_flush_desc_rings(sc);
   5870 
   5871 	/* Reset the chip to a known state. */
   5872 	wm_reset(sc);
   5873 
   5874 	/*
   5875 	 * AMT based hardware can now take control from firmware
   5876 	 * Do this after reset.
   5877 	 */
   5878 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5879 		wm_get_hw_control(sc);
   5880 
   5881 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5882 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5883 		wm_legacy_irq_quirk_spt(sc);
   5884 
   5885 	/* Init hardware bits */
   5886 	wm_initialize_hardware_bits(sc);
   5887 
   5888 	/* Reset the PHY. */
   5889 	if (sc->sc_flags & WM_F_HAS_MII)
   5890 		wm_gmii_reset(sc);
   5891 
   5892 	if (sc->sc_type >= WM_T_ICH8) {
   5893 		reg = CSR_READ(sc, WMREG_GCR);
   5894 		/*
   5895 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5896 		 * default after reset.
   5897 		 */
   5898 		if (sc->sc_type == WM_T_ICH8)
   5899 			reg |= GCR_NO_SNOOP_ALL;
   5900 		else
   5901 			reg &= ~GCR_NO_SNOOP_ALL;
   5902 		CSR_WRITE(sc, WMREG_GCR, reg);
   5903 	}
   5904 
   5905 	if ((sc->sc_type >= WM_T_ICH8)
   5906 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5907 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5908 
   5909 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5910 		reg |= CTRL_EXT_RO_DIS;
   5911 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5912 	}
   5913 
   5914 	/* Calculate (E)ITR value */
   5915 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5916 		/*
   5917 		 * For NEWQUEUE's EITR (except for 82575).
   5918 		 * 82575's EITR should be set same throttling value as other
   5919 		 * old controllers' ITR because the interrupt/sec calculation
   5920 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5921 		 *
   5922 		 * 82574's EITR should be set same throttling value as ITR.
   5923 		 *
   5924 		 * For N interrupts/sec, set this value to:
   5925 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5926 		 */
   5927 		sc->sc_itr_init = 450;
   5928 	} else if (sc->sc_type >= WM_T_82543) {
   5929 		/*
   5930 		 * Set up the interrupt throttling register (units of 256ns)
   5931 		 * Note that a footnote in Intel's documentation says this
   5932 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5933 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5934 		 * that that is also true for the 1024ns units of the other
   5935 		 * interrupt-related timer registers -- so, really, we ought
   5936 		 * to divide this value by 4 when the link speed is low.
   5937 		 *
   5938 		 * XXX implement this division at link speed change!
   5939 		 */
   5940 
   5941 		/*
   5942 		 * For N interrupts/sec, set this value to:
   5943 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5944 		 * absolute and packet timer values to this value
   5945 		 * divided by 4 to get "simple timer" behavior.
   5946 		 */
   5947 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5948 	}
   5949 
   5950 	error = wm_init_txrx_queues(sc);
   5951 	if (error)
   5952 		goto out;
   5953 
   5954 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   5955 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   5956 	    (sc->sc_type >= WM_T_82575))
   5957 		wm_serdes_power_up_link_82575(sc);
   5958 
   5959 	/* Clear out the VLAN table -- we don't use it (yet). */
   5960 	CSR_WRITE(sc, WMREG_VET, 0);
   5961 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5962 		trynum = 10; /* Due to hw errata */
   5963 	else
   5964 		trynum = 1;
   5965 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5966 		for (j = 0; j < trynum; j++)
   5967 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5968 
   5969 	/*
   5970 	 * Set up flow-control parameters.
   5971 	 *
   5972 	 * XXX Values could probably stand some tuning.
   5973 	 */
   5974 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5975 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5976 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5977 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5978 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5979 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5980 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5981 	}
   5982 
   5983 	sc->sc_fcrtl = FCRTL_DFLT;
   5984 	if (sc->sc_type < WM_T_82543) {
   5985 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5986 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5987 	} else {
   5988 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5989 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5990 	}
   5991 
   5992 	if (sc->sc_type == WM_T_80003)
   5993 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5994 	else
   5995 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5996 
   5997 	/* Writes the control register. */
   5998 	wm_set_vlan(sc);
   5999 
   6000 	if (sc->sc_flags & WM_F_HAS_MII) {
   6001 		uint16_t kmreg;
   6002 
   6003 		switch (sc->sc_type) {
   6004 		case WM_T_80003:
   6005 		case WM_T_ICH8:
   6006 		case WM_T_ICH9:
   6007 		case WM_T_ICH10:
   6008 		case WM_T_PCH:
   6009 		case WM_T_PCH2:
   6010 		case WM_T_PCH_LPT:
   6011 		case WM_T_PCH_SPT:
   6012 		case WM_T_PCH_CNP:
   6013 			/*
   6014 			 * Set the mac to wait the maximum time between each
   6015 			 * iteration and increase the max iterations when
   6016 			 * polling the phy; this fixes erroneous timeouts at
   6017 			 * 10Mbps.
   6018 			 */
   6019 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6020 			    0xFFFF);
   6021 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6022 			    &kmreg);
   6023 			kmreg |= 0x3F;
   6024 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6025 			    kmreg);
   6026 			break;
   6027 		default:
   6028 			break;
   6029 		}
   6030 
   6031 		if (sc->sc_type == WM_T_80003) {
   6032 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6033 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6034 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6035 
   6036 			/* Bypass RX and TX FIFO's */
   6037 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6038 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6039 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6040 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6041 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6042 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6043 		}
   6044 	}
   6045 #if 0
   6046 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6047 #endif
   6048 
   6049 	/* Set up checksum offload parameters. */
   6050 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6051 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6052 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6053 		reg |= RXCSUM_IPOFL;
   6054 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6055 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6056 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6057 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6058 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6059 
   6060 	/* Set registers about MSI-X */
   6061 	if (wm_is_using_msix(sc)) {
   6062 		uint32_t ivar, qintr_idx;
   6063 		struct wm_queue *wmq;
   6064 		unsigned int qid;
   6065 
   6066 		if (sc->sc_type == WM_T_82575) {
   6067 			/* Interrupt control */
   6068 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6069 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6070 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6071 
   6072 			/* TX and RX */
   6073 			for (i = 0; i < sc->sc_nqueues; i++) {
   6074 				wmq = &sc->sc_queue[i];
   6075 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6076 				    EITR_TX_QUEUE(wmq->wmq_id)
   6077 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6078 			}
   6079 			/* Link status */
   6080 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6081 			    EITR_OTHER);
   6082 		} else if (sc->sc_type == WM_T_82574) {
   6083 			/* Interrupt control */
   6084 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6085 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6086 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6087 
   6088 			/*
   6089 			 * Workaround issue with spurious interrupts
   6090 			 * in MSI-X mode.
   6091 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6092 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6093 			 */
   6094 			reg = CSR_READ(sc, WMREG_RFCTL);
   6095 			reg |= WMREG_RFCTL_ACKDIS;
   6096 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6097 
   6098 			ivar = 0;
   6099 			/* TX and RX */
   6100 			for (i = 0; i < sc->sc_nqueues; i++) {
   6101 				wmq = &sc->sc_queue[i];
   6102 				qid = wmq->wmq_id;
   6103 				qintr_idx = wmq->wmq_intr_idx;
   6104 
   6105 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6106 				    IVAR_TX_MASK_Q_82574(qid));
   6107 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6108 				    IVAR_RX_MASK_Q_82574(qid));
   6109 			}
   6110 			/* Link status */
   6111 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6112 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6113 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6114 		} else {
   6115 			/* Interrupt control */
   6116 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6117 			    | GPIE_EIAME | GPIE_PBA);
   6118 
   6119 			switch (sc->sc_type) {
   6120 			case WM_T_82580:
   6121 			case WM_T_I350:
   6122 			case WM_T_I354:
   6123 			case WM_T_I210:
   6124 			case WM_T_I211:
   6125 				/* TX and RX */
   6126 				for (i = 0; i < sc->sc_nqueues; i++) {
   6127 					wmq = &sc->sc_queue[i];
   6128 					qid = wmq->wmq_id;
   6129 					qintr_idx = wmq->wmq_intr_idx;
   6130 
   6131 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6132 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6133 					ivar |= __SHIFTIN((qintr_idx
   6134 						| IVAR_VALID),
   6135 					    IVAR_TX_MASK_Q(qid));
   6136 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6137 					ivar |= __SHIFTIN((qintr_idx
   6138 						| IVAR_VALID),
   6139 					    IVAR_RX_MASK_Q(qid));
   6140 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6141 				}
   6142 				break;
   6143 			case WM_T_82576:
   6144 				/* TX and RX */
   6145 				for (i = 0; i < sc->sc_nqueues; i++) {
   6146 					wmq = &sc->sc_queue[i];
   6147 					qid = wmq->wmq_id;
   6148 					qintr_idx = wmq->wmq_intr_idx;
   6149 
   6150 					ivar = CSR_READ(sc,
   6151 					    WMREG_IVAR_Q_82576(qid));
   6152 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6153 					ivar |= __SHIFTIN((qintr_idx
   6154 						| IVAR_VALID),
   6155 					    IVAR_TX_MASK_Q_82576(qid));
   6156 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6157 					ivar |= __SHIFTIN((qintr_idx
   6158 						| IVAR_VALID),
   6159 					    IVAR_RX_MASK_Q_82576(qid));
   6160 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6161 					    ivar);
   6162 				}
   6163 				break;
   6164 			default:
   6165 				break;
   6166 			}
   6167 
   6168 			/* Link status */
   6169 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6170 			    IVAR_MISC_OTHER);
   6171 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6172 		}
   6173 
   6174 		if (wm_is_using_multiqueue(sc)) {
   6175 			wm_init_rss(sc);
   6176 
   6177 			/*
   6178 			** NOTE: Receive Full-Packet Checksum Offload
   6179 			** is mutually exclusive with Multiqueue. However
   6180 			** this is not the same as TCP/IP checksums which
   6181 			** still work.
   6182 			*/
   6183 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6184 			reg |= RXCSUM_PCSD;
   6185 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6186 		}
   6187 	}
   6188 
   6189 	/* Set up the interrupt registers. */
   6190 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6191 
   6192 	/* Enable SFP module insertion interrupt if it's required */
   6193 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6194 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6195 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6196 		sfp_mask = ICR_GPI(0);
   6197 	}
   6198 
   6199 	if (wm_is_using_msix(sc)) {
   6200 		uint32_t mask;
   6201 		struct wm_queue *wmq;
   6202 
   6203 		switch (sc->sc_type) {
   6204 		case WM_T_82574:
   6205 			mask = 0;
   6206 			for (i = 0; i < sc->sc_nqueues; i++) {
   6207 				wmq = &sc->sc_queue[i];
   6208 				mask |= ICR_TXQ(wmq->wmq_id);
   6209 				mask |= ICR_RXQ(wmq->wmq_id);
   6210 			}
   6211 			mask |= ICR_OTHER;
   6212 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6213 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6214 			break;
   6215 		default:
   6216 			if (sc->sc_type == WM_T_82575) {
   6217 				mask = 0;
   6218 				for (i = 0; i < sc->sc_nqueues; i++) {
   6219 					wmq = &sc->sc_queue[i];
   6220 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6221 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6222 				}
   6223 				mask |= EITR_OTHER;
   6224 			} else {
   6225 				mask = 0;
   6226 				for (i = 0; i < sc->sc_nqueues; i++) {
   6227 					wmq = &sc->sc_queue[i];
   6228 					mask |= 1 << wmq->wmq_intr_idx;
   6229 				}
   6230 				mask |= 1 << sc->sc_link_intr_idx;
   6231 			}
   6232 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6233 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6234 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6235 
   6236 			/* For other interrupts */
   6237 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6238 			break;
   6239 		}
   6240 	} else {
   6241 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6242 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6243 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6244 	}
   6245 
   6246 	/* Set up the inter-packet gap. */
   6247 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6248 
   6249 	if (sc->sc_type >= WM_T_82543) {
   6250 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6251 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6252 			wm_itrs_writereg(sc, wmq);
   6253 		}
   6254 		/*
   6255 		 * Link interrupts occur much less than TX
   6256 		 * interrupts and RX interrupts. So, we don't
   6257 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6258 		 * FreeBSD's if_igb.
   6259 		 */
   6260 	}
   6261 
   6262 	/* Set the VLAN ethernetype. */
   6263 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6264 
   6265 	/*
   6266 	 * Set up the transmit control register; we start out with
   6267 	 * a collision distance suitable for FDX, but update it whe
   6268 	 * we resolve the media type.
   6269 	 */
   6270 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6271 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6272 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6273 	if (sc->sc_type >= WM_T_82571)
   6274 		sc->sc_tctl |= TCTL_MULR;
   6275 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6276 
   6277 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6278 		/* Write TDT after TCTL.EN is set. See the document. */
   6279 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6280 	}
   6281 
   6282 	if (sc->sc_type == WM_T_80003) {
   6283 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6284 		reg &= ~TCTL_EXT_GCEX_MASK;
   6285 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6286 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6287 	}
   6288 
   6289 	/* Set the media. */
   6290 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6291 		goto out;
   6292 
   6293 	/* Configure for OS presence */
   6294 	wm_init_manageability(sc);
   6295 
   6296 	/*
   6297 	 * Set up the receive control register; we actually program the
   6298 	 * register when we set the receive filter. Use multicast address
   6299 	 * offset type 0.
   6300 	 *
   6301 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6302 	 * don't enable that feature.
   6303 	 */
   6304 	sc->sc_mchash_type = 0;
   6305 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6306 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6307 
   6308 	/* 82574 use one buffer extended Rx descriptor. */
   6309 	if (sc->sc_type == WM_T_82574)
   6310 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6311 
   6312 	/*
   6313 	 * The I350 has a bug where it always strips the CRC whether
   6314 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6315 	 */
   6316 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6317 	    || (sc->sc_type == WM_T_I210))
   6318 		sc->sc_rctl |= RCTL_SECRC;
   6319 
   6320 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6321 	    && (ifp->if_mtu > ETHERMTU)) {
   6322 		sc->sc_rctl |= RCTL_LPE;
   6323 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6324 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6325 	}
   6326 
   6327 	if (MCLBYTES == 2048)
   6328 		sc->sc_rctl |= RCTL_2k;
   6329 	else {
   6330 		if (sc->sc_type >= WM_T_82543) {
   6331 			switch (MCLBYTES) {
   6332 			case 4096:
   6333 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6334 				break;
   6335 			case 8192:
   6336 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6337 				break;
   6338 			case 16384:
   6339 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6340 				break;
   6341 			default:
   6342 				panic("wm_init: MCLBYTES %d unsupported",
   6343 				    MCLBYTES);
   6344 				break;
   6345 			}
   6346 		} else
   6347 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6348 	}
   6349 
   6350 	/* Enable ECC */
   6351 	switch (sc->sc_type) {
   6352 	case WM_T_82571:
   6353 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6354 		reg |= PBA_ECC_CORR_EN;
   6355 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6356 		break;
   6357 	case WM_T_PCH_LPT:
   6358 	case WM_T_PCH_SPT:
   6359 	case WM_T_PCH_CNP:
   6360 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6361 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6362 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6363 
   6364 		sc->sc_ctrl |= CTRL_MEHE;
   6365 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6366 		break;
   6367 	default:
   6368 		break;
   6369 	}
   6370 
   6371 	/*
   6372 	 * Set the receive filter.
   6373 	 *
   6374 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6375 	 * the setting of RCTL.EN in wm_set_filter()
   6376 	 */
   6377 	wm_set_filter(sc);
   6378 
   6379 	/* On 575 and later set RDT only if RX enabled */
   6380 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6381 		int qidx;
   6382 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6383 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6384 			for (i = 0; i < WM_NRXDESC; i++) {
   6385 				mutex_enter(rxq->rxq_lock);
   6386 				wm_init_rxdesc(rxq, i);
   6387 				mutex_exit(rxq->rxq_lock);
   6388 
   6389 			}
   6390 		}
   6391 	}
   6392 
   6393 	wm_unset_stopping_flags(sc);
   6394 
   6395 	/* Start the one second link check clock. */
   6396 	callout_schedule(&sc->sc_tick_ch, hz);
   6397 
   6398 	/* ...all done! */
   6399 	ifp->if_flags |= IFF_RUNNING;
   6400 
   6401  out:
   6402 	/* Save last flags for the callback */
   6403 	sc->sc_if_flags = ifp->if_flags;
   6404 	sc->sc_ec_capenable = ec->ec_capenable;
   6405 	if (error)
   6406 		log(LOG_ERR, "%s: interface not running\n",
   6407 		    device_xname(sc->sc_dev));
   6408 	return error;
   6409 }
   6410 
   6411 /*
   6412  * wm_stop:		[ifnet interface function]
   6413  *
   6414  *	Stop transmission on the interface.
   6415  */
   6416 static void
   6417 wm_stop(struct ifnet *ifp, int disable)
   6418 {
   6419 	struct wm_softc *sc = ifp->if_softc;
   6420 
   6421 	ASSERT_SLEEPABLE();
   6422 
   6423 	WM_CORE_LOCK(sc);
   6424 	wm_stop_locked(ifp, disable ? true : false, true);
   6425 	WM_CORE_UNLOCK(sc);
   6426 
   6427 	/*
   6428 	 * After wm_set_stopping_flags(), it is guaranteed
   6429 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6430 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6431 	 * because it can sleep...
   6432 	 * so, call workqueue_wait() here.
   6433 	 */
   6434 	for (int i = 0; i < sc->sc_nqueues; i++)
   6435 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6436 }
   6437 
   6438 static void
   6439 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6440 {
   6441 	struct wm_softc *sc = ifp->if_softc;
   6442 	struct wm_txsoft *txs;
   6443 	int i, qidx;
   6444 
   6445 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6446 		device_xname(sc->sc_dev), __func__));
   6447 	KASSERT(WM_CORE_LOCKED(sc));
   6448 
   6449 	wm_set_stopping_flags(sc);
   6450 
   6451 	if (sc->sc_flags & WM_F_HAS_MII) {
   6452 		/* Down the MII. */
   6453 		mii_down(&sc->sc_mii);
   6454 	} else {
   6455 #if 0
   6456 		/* Should we clear PHY's status properly? */
   6457 		wm_reset(sc);
   6458 #endif
   6459 	}
   6460 
   6461 	/* Stop the transmit and receive processes. */
   6462 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6463 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6464 	sc->sc_rctl &= ~RCTL_EN;
   6465 
   6466 	/*
   6467 	 * Clear the interrupt mask to ensure the device cannot assert its
   6468 	 * interrupt line.
   6469 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6470 	 * service any currently pending or shared interrupt.
   6471 	 */
   6472 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6473 	sc->sc_icr = 0;
   6474 	if (wm_is_using_msix(sc)) {
   6475 		if (sc->sc_type != WM_T_82574) {
   6476 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6477 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6478 		} else
   6479 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6480 	}
   6481 
   6482 	/*
   6483 	 * Stop callouts after interrupts are disabled; if we have
   6484 	 * to wait for them, we will be releasing the CORE_LOCK
   6485 	 * briefly, which will unblock interrupts on the current CPU.
   6486 	 */
   6487 
   6488 	/* Stop the one second clock. */
   6489 	if (wait)
   6490 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6491 	else
   6492 		callout_stop(&sc->sc_tick_ch);
   6493 
   6494 	/* Stop the 82547 Tx FIFO stall check timer. */
   6495 	if (sc->sc_type == WM_T_82547) {
   6496 		if (wait)
   6497 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6498 		else
   6499 			callout_stop(&sc->sc_txfifo_ch);
   6500 	}
   6501 
   6502 	/* Release any queued transmit buffers. */
   6503 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6504 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6505 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6506 		mutex_enter(txq->txq_lock);
   6507 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6508 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6509 			txs = &txq->txq_soft[i];
   6510 			if (txs->txs_mbuf != NULL) {
   6511 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6512 				m_freem(txs->txs_mbuf);
   6513 				txs->txs_mbuf = NULL;
   6514 			}
   6515 		}
   6516 		mutex_exit(txq->txq_lock);
   6517 	}
   6518 
   6519 	/* Mark the interface as down and cancel the watchdog timer. */
   6520 	ifp->if_flags &= ~IFF_RUNNING;
   6521 
   6522 	if (disable) {
   6523 		for (i = 0; i < sc->sc_nqueues; i++) {
   6524 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6525 			mutex_enter(rxq->rxq_lock);
   6526 			wm_rxdrain(rxq);
   6527 			mutex_exit(rxq->rxq_lock);
   6528 		}
   6529 	}
   6530 
   6531 #if 0 /* notyet */
   6532 	if (sc->sc_type >= WM_T_82544)
   6533 		CSR_WRITE(sc, WMREG_WUC, 0);
   6534 #endif
   6535 }
   6536 
   6537 static void
   6538 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6539 {
   6540 	struct mbuf *m;
   6541 	int i;
   6542 
   6543 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6544 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6545 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6546 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6547 		    m->m_data, m->m_len, m->m_flags);
   6548 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6549 	    i, i == 1 ? "" : "s");
   6550 }
   6551 
   6552 /*
   6553  * wm_82547_txfifo_stall:
   6554  *
   6555  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6556  *	reset the FIFO pointers, and restart packet transmission.
   6557  */
   6558 static void
   6559 wm_82547_txfifo_stall(void *arg)
   6560 {
   6561 	struct wm_softc *sc = arg;
   6562 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6563 
   6564 	mutex_enter(txq->txq_lock);
   6565 
   6566 	if (txq->txq_stopping)
   6567 		goto out;
   6568 
   6569 	if (txq->txq_fifo_stall) {
   6570 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6571 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6572 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6573 			/*
   6574 			 * Packets have drained.  Stop transmitter, reset
   6575 			 * FIFO pointers, restart transmitter, and kick
   6576 			 * the packet queue.
   6577 			 */
   6578 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6579 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6580 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6581 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6582 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6583 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6584 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6585 			CSR_WRITE_FLUSH(sc);
   6586 
   6587 			txq->txq_fifo_head = 0;
   6588 			txq->txq_fifo_stall = 0;
   6589 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6590 		} else {
   6591 			/*
   6592 			 * Still waiting for packets to drain; try again in
   6593 			 * another tick.
   6594 			 */
   6595 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6596 		}
   6597 	}
   6598 
   6599 out:
   6600 	mutex_exit(txq->txq_lock);
   6601 }
   6602 
   6603 /*
   6604  * wm_82547_txfifo_bugchk:
   6605  *
   6606  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6607  *	prevent enqueueing a packet that would wrap around the end
   6608  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6609  *
   6610  *	We do this by checking the amount of space before the end
   6611  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6612  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6613  *	the internal FIFO pointers to the beginning, and restart
   6614  *	transmission on the interface.
   6615  */
   6616 #define	WM_FIFO_HDR		0x10
   6617 #define	WM_82547_PAD_LEN	0x3e0
   6618 static int
   6619 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6620 {
   6621 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6622 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6623 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6624 
   6625 	/* Just return if already stalled. */
   6626 	if (txq->txq_fifo_stall)
   6627 		return 1;
   6628 
   6629 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6630 		/* Stall only occurs in half-duplex mode. */
   6631 		goto send_packet;
   6632 	}
   6633 
   6634 	if (len >= WM_82547_PAD_LEN + space) {
   6635 		txq->txq_fifo_stall = 1;
   6636 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6637 		return 1;
   6638 	}
   6639 
   6640  send_packet:
   6641 	txq->txq_fifo_head += len;
   6642 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6643 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6644 
   6645 	return 0;
   6646 }
   6647 
   6648 static int
   6649 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6650 {
   6651 	int error;
   6652 
   6653 	/*
   6654 	 * Allocate the control data structures, and create and load the
   6655 	 * DMA map for it.
   6656 	 *
   6657 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6658 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6659 	 * both sets within the same 4G segment.
   6660 	 */
   6661 	if (sc->sc_type < WM_T_82544)
   6662 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6663 	else
   6664 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6665 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6666 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6667 	else
   6668 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6669 
   6670 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6671 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6672 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6673 		aprint_error_dev(sc->sc_dev,
   6674 		    "unable to allocate TX control data, error = %d\n",
   6675 		    error);
   6676 		goto fail_0;
   6677 	}
   6678 
   6679 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6680 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6681 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6682 		aprint_error_dev(sc->sc_dev,
   6683 		    "unable to map TX control data, error = %d\n", error);
   6684 		goto fail_1;
   6685 	}
   6686 
   6687 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6688 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6689 		aprint_error_dev(sc->sc_dev,
   6690 		    "unable to create TX control data DMA map, error = %d\n",
   6691 		    error);
   6692 		goto fail_2;
   6693 	}
   6694 
   6695 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6696 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6697 		aprint_error_dev(sc->sc_dev,
   6698 		    "unable to load TX control data DMA map, error = %d\n",
   6699 		    error);
   6700 		goto fail_3;
   6701 	}
   6702 
   6703 	return 0;
   6704 
   6705  fail_3:
   6706 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6707  fail_2:
   6708 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6709 	    WM_TXDESCS_SIZE(txq));
   6710  fail_1:
   6711 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6712  fail_0:
   6713 	return error;
   6714 }
   6715 
   6716 static void
   6717 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6718 {
   6719 
   6720 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6721 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6722 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6723 	    WM_TXDESCS_SIZE(txq));
   6724 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6725 }
   6726 
   6727 static int
   6728 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6729 {
   6730 	int error;
   6731 	size_t rxq_descs_size;
   6732 
   6733 	/*
   6734 	 * Allocate the control data structures, and create and load the
   6735 	 * DMA map for it.
   6736 	 *
   6737 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6738 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6739 	 * both sets within the same 4G segment.
   6740 	 */
   6741 	rxq->rxq_ndesc = WM_NRXDESC;
   6742 	if (sc->sc_type == WM_T_82574)
   6743 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6744 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6745 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6746 	else
   6747 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6748 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6749 
   6750 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6751 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6752 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6753 		aprint_error_dev(sc->sc_dev,
   6754 		    "unable to allocate RX control data, error = %d\n",
   6755 		    error);
   6756 		goto fail_0;
   6757 	}
   6758 
   6759 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6760 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6761 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6762 		aprint_error_dev(sc->sc_dev,
   6763 		    "unable to map RX control data, error = %d\n", error);
   6764 		goto fail_1;
   6765 	}
   6766 
   6767 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6768 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6769 		aprint_error_dev(sc->sc_dev,
   6770 		    "unable to create RX control data DMA map, error = %d\n",
   6771 		    error);
   6772 		goto fail_2;
   6773 	}
   6774 
   6775 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6776 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6777 		aprint_error_dev(sc->sc_dev,
   6778 		    "unable to load RX control data DMA map, error = %d\n",
   6779 		    error);
   6780 		goto fail_3;
   6781 	}
   6782 
   6783 	return 0;
   6784 
   6785  fail_3:
   6786 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6787  fail_2:
   6788 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6789 	    rxq_descs_size);
   6790  fail_1:
   6791 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6792  fail_0:
   6793 	return error;
   6794 }
   6795 
   6796 static void
   6797 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6798 {
   6799 
   6800 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6801 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6802 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6803 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6804 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6805 }
   6806 
   6807 
   6808 static int
   6809 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6810 {
   6811 	int i, error;
   6812 
   6813 	/* Create the transmit buffer DMA maps. */
   6814 	WM_TXQUEUELEN(txq) =
   6815 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6816 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6817 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6818 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6819 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6820 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6821 			aprint_error_dev(sc->sc_dev,
   6822 			    "unable to create Tx DMA map %d, error = %d\n",
   6823 			    i, error);
   6824 			goto fail;
   6825 		}
   6826 	}
   6827 
   6828 	return 0;
   6829 
   6830  fail:
   6831 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6832 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6833 			bus_dmamap_destroy(sc->sc_dmat,
   6834 			    txq->txq_soft[i].txs_dmamap);
   6835 	}
   6836 	return error;
   6837 }
   6838 
   6839 static void
   6840 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6841 {
   6842 	int i;
   6843 
   6844 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6845 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6846 			bus_dmamap_destroy(sc->sc_dmat,
   6847 			    txq->txq_soft[i].txs_dmamap);
   6848 	}
   6849 }
   6850 
   6851 static int
   6852 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6853 {
   6854 	int i, error;
   6855 
   6856 	/* Create the receive buffer DMA maps. */
   6857 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6858 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6859 			    MCLBYTES, 0, 0,
   6860 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6861 			aprint_error_dev(sc->sc_dev,
   6862 			    "unable to create Rx DMA map %d error = %d\n",
   6863 			    i, error);
   6864 			goto fail;
   6865 		}
   6866 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6867 	}
   6868 
   6869 	return 0;
   6870 
   6871  fail:
   6872 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6873 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6874 			bus_dmamap_destroy(sc->sc_dmat,
   6875 			    rxq->rxq_soft[i].rxs_dmamap);
   6876 	}
   6877 	return error;
   6878 }
   6879 
   6880 static void
   6881 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6882 {
   6883 	int i;
   6884 
   6885 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6886 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6887 			bus_dmamap_destroy(sc->sc_dmat,
   6888 			    rxq->rxq_soft[i].rxs_dmamap);
   6889 	}
   6890 }
   6891 
   6892 /*
   6893  * wm_alloc_quques:
   6894  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6895  */
   6896 static int
   6897 wm_alloc_txrx_queues(struct wm_softc *sc)
   6898 {
   6899 	int i, error, tx_done, rx_done;
   6900 
   6901 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6902 	    KM_SLEEP);
   6903 	if (sc->sc_queue == NULL) {
   6904 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6905 		error = ENOMEM;
   6906 		goto fail_0;
   6907 	}
   6908 
   6909 	/* For transmission */
   6910 	error = 0;
   6911 	tx_done = 0;
   6912 	for (i = 0; i < sc->sc_nqueues; i++) {
   6913 #ifdef WM_EVENT_COUNTERS
   6914 		int j;
   6915 		const char *xname;
   6916 #endif
   6917 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6918 		txq->txq_sc = sc;
   6919 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6920 
   6921 		error = wm_alloc_tx_descs(sc, txq);
   6922 		if (error)
   6923 			break;
   6924 		error = wm_alloc_tx_buffer(sc, txq);
   6925 		if (error) {
   6926 			wm_free_tx_descs(sc, txq);
   6927 			break;
   6928 		}
   6929 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6930 		if (txq->txq_interq == NULL) {
   6931 			wm_free_tx_descs(sc, txq);
   6932 			wm_free_tx_buffer(sc, txq);
   6933 			error = ENOMEM;
   6934 			break;
   6935 		}
   6936 
   6937 #ifdef WM_EVENT_COUNTERS
   6938 		xname = device_xname(sc->sc_dev);
   6939 
   6940 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6941 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6942 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6943 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6944 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6945 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6946 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6947 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6948 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6949 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6950 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6951 
   6952 		for (j = 0; j < WM_NTXSEGS; j++) {
   6953 			snprintf(txq->txq_txseg_evcnt_names[j],
   6954 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6955 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6956 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6957 		}
   6958 
   6959 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6960 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6961 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6962 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6963 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6964 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   6965 #endif /* WM_EVENT_COUNTERS */
   6966 
   6967 		tx_done++;
   6968 	}
   6969 	if (error)
   6970 		goto fail_1;
   6971 
   6972 	/* For receive */
   6973 	error = 0;
   6974 	rx_done = 0;
   6975 	for (i = 0; i < sc->sc_nqueues; i++) {
   6976 #ifdef WM_EVENT_COUNTERS
   6977 		const char *xname;
   6978 #endif
   6979 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6980 		rxq->rxq_sc = sc;
   6981 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6982 
   6983 		error = wm_alloc_rx_descs(sc, rxq);
   6984 		if (error)
   6985 			break;
   6986 
   6987 		error = wm_alloc_rx_buffer(sc, rxq);
   6988 		if (error) {
   6989 			wm_free_rx_descs(sc, rxq);
   6990 			break;
   6991 		}
   6992 
   6993 #ifdef WM_EVENT_COUNTERS
   6994 		xname = device_xname(sc->sc_dev);
   6995 
   6996 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6997 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6998 
   6999 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7000 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7001 #endif /* WM_EVENT_COUNTERS */
   7002 
   7003 		rx_done++;
   7004 	}
   7005 	if (error)
   7006 		goto fail_2;
   7007 
   7008 	return 0;
   7009 
   7010  fail_2:
   7011 	for (i = 0; i < rx_done; i++) {
   7012 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7013 		wm_free_rx_buffer(sc, rxq);
   7014 		wm_free_rx_descs(sc, rxq);
   7015 		if (rxq->rxq_lock)
   7016 			mutex_obj_free(rxq->rxq_lock);
   7017 	}
   7018  fail_1:
   7019 	for (i = 0; i < tx_done; i++) {
   7020 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7021 		pcq_destroy(txq->txq_interq);
   7022 		wm_free_tx_buffer(sc, txq);
   7023 		wm_free_tx_descs(sc, txq);
   7024 		if (txq->txq_lock)
   7025 			mutex_obj_free(txq->txq_lock);
   7026 	}
   7027 
   7028 	kmem_free(sc->sc_queue,
   7029 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7030  fail_0:
   7031 	return error;
   7032 }
   7033 
   7034 /*
   7035  * wm_free_quques:
   7036  *	Free {tx,rx}descs and {tx,rx} buffers
   7037  */
   7038 static void
   7039 wm_free_txrx_queues(struct wm_softc *sc)
   7040 {
   7041 	int i;
   7042 
   7043 	for (i = 0; i < sc->sc_nqueues; i++) {
   7044 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7045 
   7046 #ifdef WM_EVENT_COUNTERS
   7047 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7048 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7049 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7050 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7051 #endif /* WM_EVENT_COUNTERS */
   7052 
   7053 		wm_free_rx_buffer(sc, rxq);
   7054 		wm_free_rx_descs(sc, rxq);
   7055 		if (rxq->rxq_lock)
   7056 			mutex_obj_free(rxq->rxq_lock);
   7057 	}
   7058 
   7059 	for (i = 0; i < sc->sc_nqueues; i++) {
   7060 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7061 		struct mbuf *m;
   7062 #ifdef WM_EVENT_COUNTERS
   7063 		int j;
   7064 
   7065 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7066 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7067 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7068 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7069 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7070 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7071 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7072 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7073 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7074 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7075 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7076 
   7077 		for (j = 0; j < WM_NTXSEGS; j++)
   7078 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7079 
   7080 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7081 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7082 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7083 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7084 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7085 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7086 #endif /* WM_EVENT_COUNTERS */
   7087 
   7088 		/* Drain txq_interq */
   7089 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7090 			m_freem(m);
   7091 		pcq_destroy(txq->txq_interq);
   7092 
   7093 		wm_free_tx_buffer(sc, txq);
   7094 		wm_free_tx_descs(sc, txq);
   7095 		if (txq->txq_lock)
   7096 			mutex_obj_free(txq->txq_lock);
   7097 	}
   7098 
   7099 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7100 }
   7101 
   7102 static void
   7103 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7104 {
   7105 
   7106 	KASSERT(mutex_owned(txq->txq_lock));
   7107 
   7108 	/* Initialize the transmit descriptor ring. */
   7109 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7110 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7111 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7112 	txq->txq_free = WM_NTXDESC(txq);
   7113 	txq->txq_next = 0;
   7114 }
   7115 
   7116 static void
   7117 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7118     struct wm_txqueue *txq)
   7119 {
   7120 
   7121 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7122 		device_xname(sc->sc_dev), __func__));
   7123 	KASSERT(mutex_owned(txq->txq_lock));
   7124 
   7125 	if (sc->sc_type < WM_T_82543) {
   7126 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7127 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7128 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7129 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7130 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7131 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7132 	} else {
   7133 		int qid = wmq->wmq_id;
   7134 
   7135 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7136 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7137 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7138 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7139 
   7140 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7141 			/*
   7142 			 * Don't write TDT before TCTL.EN is set.
   7143 			 * See the document.
   7144 			 */
   7145 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7146 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7147 			    | TXDCTL_WTHRESH(0));
   7148 		else {
   7149 			/* XXX should update with AIM? */
   7150 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7151 			if (sc->sc_type >= WM_T_82540) {
   7152 				/* Should be the same */
   7153 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7154 			}
   7155 
   7156 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7157 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7158 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7159 		}
   7160 	}
   7161 }
   7162 
   7163 static void
   7164 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7165 {
   7166 	int i;
   7167 
   7168 	KASSERT(mutex_owned(txq->txq_lock));
   7169 
   7170 	/* Initialize the transmit job descriptors. */
   7171 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7172 		txq->txq_soft[i].txs_mbuf = NULL;
   7173 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7174 	txq->txq_snext = 0;
   7175 	txq->txq_sdirty = 0;
   7176 }
   7177 
   7178 static void
   7179 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7180     struct wm_txqueue *txq)
   7181 {
   7182 
   7183 	KASSERT(mutex_owned(txq->txq_lock));
   7184 
   7185 	/*
   7186 	 * Set up some register offsets that are different between
   7187 	 * the i82542 and the i82543 and later chips.
   7188 	 */
   7189 	if (sc->sc_type < WM_T_82543)
   7190 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7191 	else
   7192 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7193 
   7194 	wm_init_tx_descs(sc, txq);
   7195 	wm_init_tx_regs(sc, wmq, txq);
   7196 	wm_init_tx_buffer(sc, txq);
   7197 
   7198 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7199 	txq->txq_sending = false;
   7200 }
   7201 
   7202 static void
   7203 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7204     struct wm_rxqueue *rxq)
   7205 {
   7206 
   7207 	KASSERT(mutex_owned(rxq->rxq_lock));
   7208 
   7209 	/*
   7210 	 * Initialize the receive descriptor and receive job
   7211 	 * descriptor rings.
   7212 	 */
   7213 	if (sc->sc_type < WM_T_82543) {
   7214 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7215 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7216 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7217 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7218 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7219 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7220 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7221 
   7222 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7223 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7224 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7225 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7226 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7227 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7228 	} else {
   7229 		int qid = wmq->wmq_id;
   7230 
   7231 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7232 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7233 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7234 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7235 
   7236 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7237 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7238 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7239 
   7240 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7241 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7242 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7243 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7244 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7245 			    | RXDCTL_WTHRESH(1));
   7246 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7247 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7248 		} else {
   7249 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7250 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7251 			/* XXX should update with AIM? */
   7252 			CSR_WRITE(sc, WMREG_RDTR,
   7253 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7254 			/* MUST be same */
   7255 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7256 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7257 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7258 		}
   7259 	}
   7260 }
   7261 
   7262 static int
   7263 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7264 {
   7265 	struct wm_rxsoft *rxs;
   7266 	int error, i;
   7267 
   7268 	KASSERT(mutex_owned(rxq->rxq_lock));
   7269 
   7270 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7271 		rxs = &rxq->rxq_soft[i];
   7272 		if (rxs->rxs_mbuf == NULL) {
   7273 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7274 				log(LOG_ERR, "%s: unable to allocate or map "
   7275 				    "rx buffer %d, error = %d\n",
   7276 				    device_xname(sc->sc_dev), i, error);
   7277 				/*
   7278 				 * XXX Should attempt to run with fewer receive
   7279 				 * XXX buffers instead of just failing.
   7280 				 */
   7281 				wm_rxdrain(rxq);
   7282 				return ENOMEM;
   7283 			}
   7284 		} else {
   7285 			/*
   7286 			 * For 82575 and 82576, the RX descriptors must be
   7287 			 * initialized after the setting of RCTL.EN in
   7288 			 * wm_set_filter()
   7289 			 */
   7290 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7291 				wm_init_rxdesc(rxq, i);
   7292 		}
   7293 	}
   7294 	rxq->rxq_ptr = 0;
   7295 	rxq->rxq_discard = 0;
   7296 	WM_RXCHAIN_RESET(rxq);
   7297 
   7298 	return 0;
   7299 }
   7300 
   7301 static int
   7302 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7303     struct wm_rxqueue *rxq)
   7304 {
   7305 
   7306 	KASSERT(mutex_owned(rxq->rxq_lock));
   7307 
   7308 	/*
   7309 	 * Set up some register offsets that are different between
   7310 	 * the i82542 and the i82543 and later chips.
   7311 	 */
   7312 	if (sc->sc_type < WM_T_82543)
   7313 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7314 	else
   7315 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7316 
   7317 	wm_init_rx_regs(sc, wmq, rxq);
   7318 	return wm_init_rx_buffer(sc, rxq);
   7319 }
   7320 
   7321 /*
   7322  * wm_init_quques:
   7323  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7324  */
   7325 static int
   7326 wm_init_txrx_queues(struct wm_softc *sc)
   7327 {
   7328 	int i, error = 0;
   7329 
   7330 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7331 		device_xname(sc->sc_dev), __func__));
   7332 
   7333 	for (i = 0; i < sc->sc_nqueues; i++) {
   7334 		struct wm_queue *wmq = &sc->sc_queue[i];
   7335 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7336 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7337 
   7338 		/*
   7339 		 * TODO
   7340 		 * Currently, use constant variable instead of AIM.
   7341 		 * Furthermore, the interrupt interval of multiqueue which use
   7342 		 * polling mode is less than default value.
   7343 		 * More tuning and AIM are required.
   7344 		 */
   7345 		if (wm_is_using_multiqueue(sc))
   7346 			wmq->wmq_itr = 50;
   7347 		else
   7348 			wmq->wmq_itr = sc->sc_itr_init;
   7349 		wmq->wmq_set_itr = true;
   7350 
   7351 		mutex_enter(txq->txq_lock);
   7352 		wm_init_tx_queue(sc, wmq, txq);
   7353 		mutex_exit(txq->txq_lock);
   7354 
   7355 		mutex_enter(rxq->rxq_lock);
   7356 		error = wm_init_rx_queue(sc, wmq, rxq);
   7357 		mutex_exit(rxq->rxq_lock);
   7358 		if (error)
   7359 			break;
   7360 	}
   7361 
   7362 	return error;
   7363 }
   7364 
   7365 /*
   7366  * wm_tx_offload:
   7367  *
   7368  *	Set up TCP/IP checksumming parameters for the
   7369  *	specified packet.
   7370  */
   7371 static void
   7372 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7373     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7374 {
   7375 	struct mbuf *m0 = txs->txs_mbuf;
   7376 	struct livengood_tcpip_ctxdesc *t;
   7377 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7378 	uint32_t ipcse;
   7379 	struct ether_header *eh;
   7380 	int offset, iphl;
   7381 	uint8_t fields;
   7382 
   7383 	/*
   7384 	 * XXX It would be nice if the mbuf pkthdr had offset
   7385 	 * fields for the protocol headers.
   7386 	 */
   7387 
   7388 	eh = mtod(m0, struct ether_header *);
   7389 	switch (htons(eh->ether_type)) {
   7390 	case ETHERTYPE_IP:
   7391 	case ETHERTYPE_IPV6:
   7392 		offset = ETHER_HDR_LEN;
   7393 		break;
   7394 
   7395 	case ETHERTYPE_VLAN:
   7396 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7397 		break;
   7398 
   7399 	default:
   7400 		/* Don't support this protocol or encapsulation. */
   7401  		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7402  		txq->txq_last_hw_ipcs = 0;
   7403  		txq->txq_last_hw_tucs = 0;
   7404 		*fieldsp = 0;
   7405 		*cmdp = 0;
   7406 		return;
   7407 	}
   7408 
   7409 	if ((m0->m_pkthdr.csum_flags &
   7410 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7411 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7412 	} else
   7413 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7414 
   7415 	ipcse = offset + iphl - 1;
   7416 
   7417 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7418 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7419 	seg = 0;
   7420 	fields = 0;
   7421 
   7422 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7423 		int hlen = offset + iphl;
   7424 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7425 
   7426 		if (__predict_false(m0->m_len <
   7427 				    (hlen + sizeof(struct tcphdr)))) {
   7428 			/*
   7429 			 * TCP/IP headers are not in the first mbuf; we need
   7430 			 * to do this the slow and painful way. Let's just
   7431 			 * hope this doesn't happen very often.
   7432 			 */
   7433 			struct tcphdr th;
   7434 
   7435 			WM_Q_EVCNT_INCR(txq, tsopain);
   7436 
   7437 			m_copydata(m0, hlen, sizeof(th), &th);
   7438 			if (v4) {
   7439 				struct ip ip;
   7440 
   7441 				m_copydata(m0, offset, sizeof(ip), &ip);
   7442 				ip.ip_len = 0;
   7443 				m_copyback(m0,
   7444 				    offset + offsetof(struct ip, ip_len),
   7445 				    sizeof(ip.ip_len), &ip.ip_len);
   7446 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7447 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7448 			} else {
   7449 				struct ip6_hdr ip6;
   7450 
   7451 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7452 				ip6.ip6_plen = 0;
   7453 				m_copyback(m0,
   7454 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7455 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7456 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7457 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7458 			}
   7459 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7460 			    sizeof(th.th_sum), &th.th_sum);
   7461 
   7462 			hlen += th.th_off << 2;
   7463 		} else {
   7464 			/*
   7465 			 * TCP/IP headers are in the first mbuf; we can do
   7466 			 * this the easy way.
   7467 			 */
   7468 			struct tcphdr *th;
   7469 
   7470 			if (v4) {
   7471 				struct ip *ip =
   7472 				    (void *)(mtod(m0, char *) + offset);
   7473 				th = (void *)(mtod(m0, char *) + hlen);
   7474 
   7475 				ip->ip_len = 0;
   7476 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7477 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7478 			} else {
   7479 				struct ip6_hdr *ip6 =
   7480 				    (void *)(mtod(m0, char *) + offset);
   7481 				th = (void *)(mtod(m0, char *) + hlen);
   7482 
   7483 				ip6->ip6_plen = 0;
   7484 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7485 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7486 			}
   7487 			hlen += th->th_off << 2;
   7488 		}
   7489 
   7490 		if (v4) {
   7491 			WM_Q_EVCNT_INCR(txq, tso);
   7492 			cmdlen |= WTX_TCPIP_CMD_IP;
   7493 		} else {
   7494 			WM_Q_EVCNT_INCR(txq, tso6);
   7495 			ipcse = 0;
   7496 		}
   7497 		cmd |= WTX_TCPIP_CMD_TSE;
   7498 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7499 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7500 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7501 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7502 	}
   7503 
   7504 	/*
   7505 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7506 	 * offload feature, if we load the context descriptor, we
   7507 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7508 	 */
   7509 
   7510 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7511 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7512 	    WTX_TCPIP_IPCSE(ipcse);
   7513 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7514 		WM_Q_EVCNT_INCR(txq, ipsum);
   7515 		fields |= WTX_IXSM;
   7516 	}
   7517 
   7518 	offset += iphl;
   7519 
   7520 	if (m0->m_pkthdr.csum_flags &
   7521 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7522 		WM_Q_EVCNT_INCR(txq, tusum);
   7523 		fields |= WTX_TXSM;
   7524 		tucs = WTX_TCPIP_TUCSS(offset) |
   7525 		    WTX_TCPIP_TUCSO(offset +
   7526 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7527 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7528 	} else if ((m0->m_pkthdr.csum_flags &
   7529 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7530 		WM_Q_EVCNT_INCR(txq, tusum6);
   7531 		fields |= WTX_TXSM;
   7532 		tucs = WTX_TCPIP_TUCSS(offset) |
   7533 		    WTX_TCPIP_TUCSO(offset +
   7534 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7535 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7536 	} else {
   7537 		/* Just initialize it to a valid TCP context. */
   7538 		tucs = WTX_TCPIP_TUCSS(offset) |
   7539 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7540 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7541 	}
   7542 
   7543 	*cmdp = cmd;
   7544 	*fieldsp = fields;
   7545 
   7546 	/*
   7547 	 * We don't have to write context descriptor for every packet
   7548 	 * except for 82574. For 82574, we must write context descriptor
   7549 	 * for every packet when we use two descriptor queues.
   7550 	 *
   7551 	 * The 82574L can only remember the *last* context used
   7552 	 * regardless of queue that it was use for.  We cannot reuse
   7553 	 * contexts on this hardware platform and must generate a new
   7554 	 * context every time.  82574L hardware spec, section 7.2.6,
   7555 	 * second note.
   7556 	 */
   7557 	if (sc->sc_nqueues < 2) {
   7558 		/*
   7559 	 	 *
   7560 	  	 * Setting up new checksum offload context for every
   7561 		 * frames takes a lot of processing time for hardware.
   7562 		 * This also reduces performance a lot for small sized
   7563 		 * frames so avoid it if driver can use previously
   7564 		 * configured checksum offload context.
   7565 		 * For TSO, in theory we can use the same TSO context only if
   7566 		 * frame is the same type(IP/TCP) and the same MSS. However
   7567 		 * checking whether a frame has the same IP/TCP structure is
   7568 		 * hard thing so just ignore that and always restablish a
   7569 		 * new TSO context.
   7570 	  	 */
   7571 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7572 		    == 0) {
   7573 			if (txq->txq_last_hw_cmd == cmd &&
   7574 			    txq->txq_last_hw_fields == fields &&
   7575 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7576 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7577 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7578 				return;
   7579 			}
   7580 		}
   7581 
   7582 	 	txq->txq_last_hw_cmd = cmd;
   7583  		txq->txq_last_hw_fields = fields;
   7584  		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7585 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7586 	}
   7587 
   7588 	/* Fill in the context descriptor. */
   7589 	t = (struct livengood_tcpip_ctxdesc *)
   7590 	    &txq->txq_descs[txq->txq_next];
   7591 	t->tcpip_ipcs = htole32(ipcs);
   7592 	t->tcpip_tucs = htole32(tucs);
   7593 	t->tcpip_cmdlen = htole32(cmdlen);
   7594 	t->tcpip_seg = htole32(seg);
   7595 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7596 
   7597 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7598 	txs->txs_ndesc++;
   7599 }
   7600 
   7601 static inline int
   7602 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7603 {
   7604 	struct wm_softc *sc = ifp->if_softc;
   7605 	u_int cpuid = cpu_index(curcpu());
   7606 
   7607 	/*
   7608 	 * Currently, simple distribute strategy.
   7609 	 * TODO:
   7610 	 * distribute by flowid(RSS has value).
   7611 	 */
   7612 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7613 }
   7614 
   7615 /*
   7616  * wm_start:		[ifnet interface function]
   7617  *
   7618  *	Start packet transmission on the interface.
   7619  */
   7620 static void
   7621 wm_start(struct ifnet *ifp)
   7622 {
   7623 	struct wm_softc *sc = ifp->if_softc;
   7624 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7625 
   7626 #ifdef WM_MPSAFE
   7627 	KASSERT(if_is_mpsafe(ifp));
   7628 #endif
   7629 	/*
   7630 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7631 	 */
   7632 
   7633 	mutex_enter(txq->txq_lock);
   7634 	if (!txq->txq_stopping)
   7635 		wm_start_locked(ifp);
   7636 	mutex_exit(txq->txq_lock);
   7637 }
   7638 
   7639 static void
   7640 wm_start_locked(struct ifnet *ifp)
   7641 {
   7642 	struct wm_softc *sc = ifp->if_softc;
   7643 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7644 
   7645 	wm_send_common_locked(ifp, txq, false);
   7646 }
   7647 
   7648 static int
   7649 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7650 {
   7651 	int qid;
   7652 	struct wm_softc *sc = ifp->if_softc;
   7653 	struct wm_txqueue *txq;
   7654 
   7655 	qid = wm_select_txqueue(ifp, m);
   7656 	txq = &sc->sc_queue[qid].wmq_txq;
   7657 
   7658 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7659 		m_freem(m);
   7660 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7661 		return ENOBUFS;
   7662 	}
   7663 
   7664 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7665 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7666 	if (m->m_flags & M_MCAST)
   7667 		if_statinc_ref(nsr, if_omcasts);
   7668 	IF_STAT_PUTREF(ifp);
   7669 
   7670 	if (mutex_tryenter(txq->txq_lock)) {
   7671 		if (!txq->txq_stopping)
   7672 			wm_transmit_locked(ifp, txq);
   7673 		mutex_exit(txq->txq_lock);
   7674 	}
   7675 
   7676 	return 0;
   7677 }
   7678 
   7679 static void
   7680 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7681 {
   7682 
   7683 	wm_send_common_locked(ifp, txq, true);
   7684 }
   7685 
   7686 static void
   7687 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7688     bool is_transmit)
   7689 {
   7690 	struct wm_softc *sc = ifp->if_softc;
   7691 	struct mbuf *m0;
   7692 	struct wm_txsoft *txs;
   7693 	bus_dmamap_t dmamap;
   7694 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7695 	bus_addr_t curaddr;
   7696 	bus_size_t seglen, curlen;
   7697 	uint32_t cksumcmd;
   7698 	uint8_t cksumfields;
   7699 	bool remap = true;
   7700 
   7701 	KASSERT(mutex_owned(txq->txq_lock));
   7702 
   7703 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7704 		return;
   7705 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7706 		return;
   7707 
   7708 	/* Remember the previous number of free descriptors. */
   7709 	ofree = txq->txq_free;
   7710 
   7711 	/*
   7712 	 * Loop through the send queue, setting up transmit descriptors
   7713 	 * until we drain the queue, or use up all available transmit
   7714 	 * descriptors.
   7715 	 */
   7716 	for (;;) {
   7717 		m0 = NULL;
   7718 
   7719 		/* Get a work queue entry. */
   7720 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7721 			wm_txeof(txq, UINT_MAX);
   7722 			if (txq->txq_sfree == 0) {
   7723 				DPRINTF(WM_DEBUG_TX,
   7724 				    ("%s: TX: no free job descriptors\n",
   7725 					device_xname(sc->sc_dev)));
   7726 				WM_Q_EVCNT_INCR(txq, txsstall);
   7727 				break;
   7728 			}
   7729 		}
   7730 
   7731 		/* Grab a packet off the queue. */
   7732 		if (is_transmit)
   7733 			m0 = pcq_get(txq->txq_interq);
   7734 		else
   7735 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7736 		if (m0 == NULL)
   7737 			break;
   7738 
   7739 		DPRINTF(WM_DEBUG_TX,
   7740 		    ("%s: TX: have packet to transmit: %p\n",
   7741 			device_xname(sc->sc_dev), m0));
   7742 
   7743 		txs = &txq->txq_soft[txq->txq_snext];
   7744 		dmamap = txs->txs_dmamap;
   7745 
   7746 		use_tso = (m0->m_pkthdr.csum_flags &
   7747 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7748 
   7749 		/*
   7750 		 * So says the Linux driver:
   7751 		 * The controller does a simple calculation to make sure
   7752 		 * there is enough room in the FIFO before initiating the
   7753 		 * DMA for each buffer. The calc is:
   7754 		 *	4 = ceil(buffer len / MSS)
   7755 		 * To make sure we don't overrun the FIFO, adjust the max
   7756 		 * buffer len if the MSS drops.
   7757 		 */
   7758 		dmamap->dm_maxsegsz =
   7759 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7760 		    ? m0->m_pkthdr.segsz << 2
   7761 		    : WTX_MAX_LEN;
   7762 
   7763 		/*
   7764 		 * Load the DMA map.  If this fails, the packet either
   7765 		 * didn't fit in the allotted number of segments, or we
   7766 		 * were short on resources.  For the too-many-segments
   7767 		 * case, we simply report an error and drop the packet,
   7768 		 * since we can't sanely copy a jumbo packet to a single
   7769 		 * buffer.
   7770 		 */
   7771 retry:
   7772 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7773 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7774 		if (__predict_false(error)) {
   7775 			if (error == EFBIG) {
   7776 				if (remap == true) {
   7777 					struct mbuf *m;
   7778 
   7779 					remap = false;
   7780 					m = m_defrag(m0, M_NOWAIT);
   7781 					if (m != NULL) {
   7782 						WM_Q_EVCNT_INCR(txq, defrag);
   7783 						m0 = m;
   7784 						goto retry;
   7785 					}
   7786 				}
   7787 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7788 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7789 				    "DMA segments, dropping...\n",
   7790 				    device_xname(sc->sc_dev));
   7791 				wm_dump_mbuf_chain(sc, m0);
   7792 				m_freem(m0);
   7793 				continue;
   7794 			}
   7795 			/* Short on resources, just stop for now. */
   7796 			DPRINTF(WM_DEBUG_TX,
   7797 			    ("%s: TX: dmamap load failed: %d\n",
   7798 				device_xname(sc->sc_dev), error));
   7799 			break;
   7800 		}
   7801 
   7802 		segs_needed = dmamap->dm_nsegs;
   7803 		if (use_tso) {
   7804 			/* For sentinel descriptor; see below. */
   7805 			segs_needed++;
   7806 		}
   7807 
   7808 		/*
   7809 		 * Ensure we have enough descriptors free to describe
   7810 		 * the packet. Note, we always reserve one descriptor
   7811 		 * at the end of the ring due to the semantics of the
   7812 		 * TDT register, plus one more in the event we need
   7813 		 * to load offload context.
   7814 		 */
   7815 		if (segs_needed > txq->txq_free - 2) {
   7816 			/*
   7817 			 * Not enough free descriptors to transmit this
   7818 			 * packet.  We haven't committed anything yet,
   7819 			 * so just unload the DMA map, put the packet
   7820 			 * pack on the queue, and punt. Notify the upper
   7821 			 * layer that there are no more slots left.
   7822 			 */
   7823 			DPRINTF(WM_DEBUG_TX,
   7824 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7825 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7826 				segs_needed, txq->txq_free - 1));
   7827 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7828 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7829 			WM_Q_EVCNT_INCR(txq, txdstall);
   7830 			break;
   7831 		}
   7832 
   7833 		/*
   7834 		 * Check for 82547 Tx FIFO bug. We need to do this
   7835 		 * once we know we can transmit the packet, since we
   7836 		 * do some internal FIFO space accounting here.
   7837 		 */
   7838 		if (sc->sc_type == WM_T_82547 &&
   7839 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7840 			DPRINTF(WM_DEBUG_TX,
   7841 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7842 				device_xname(sc->sc_dev)));
   7843 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7844 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7845 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7846 			break;
   7847 		}
   7848 
   7849 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7850 
   7851 		DPRINTF(WM_DEBUG_TX,
   7852 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7853 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7854 
   7855 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7856 
   7857 		/*
   7858 		 * Store a pointer to the packet so that we can free it
   7859 		 * later.
   7860 		 *
   7861 		 * Initially, we consider the number of descriptors the
   7862 		 * packet uses the number of DMA segments.  This may be
   7863 		 * incremented by 1 if we do checksum offload (a descriptor
   7864 		 * is used to set the checksum context).
   7865 		 */
   7866 		txs->txs_mbuf = m0;
   7867 		txs->txs_firstdesc = txq->txq_next;
   7868 		txs->txs_ndesc = segs_needed;
   7869 
   7870 		/* Set up offload parameters for this packet. */
   7871 		if (m0->m_pkthdr.csum_flags &
   7872 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7873 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7874 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7875 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   7876 		} else {
   7877  			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7878  			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   7879 			cksumcmd = 0;
   7880 			cksumfields = 0;
   7881 		}
   7882 
   7883 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7884 
   7885 		/* Sync the DMA map. */
   7886 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7887 		    BUS_DMASYNC_PREWRITE);
   7888 
   7889 		/* Initialize the transmit descriptor. */
   7890 		for (nexttx = txq->txq_next, seg = 0;
   7891 		     seg < dmamap->dm_nsegs; seg++) {
   7892 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7893 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7894 			     seglen != 0;
   7895 			     curaddr += curlen, seglen -= curlen,
   7896 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7897 				curlen = seglen;
   7898 
   7899 				/*
   7900 				 * So says the Linux driver:
   7901 				 * Work around for premature descriptor
   7902 				 * write-backs in TSO mode.  Append a
   7903 				 * 4-byte sentinel descriptor.
   7904 				 */
   7905 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7906 				    curlen > 8)
   7907 					curlen -= 4;
   7908 
   7909 				wm_set_dma_addr(
   7910 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7911 				txq->txq_descs[nexttx].wtx_cmdlen
   7912 				    = htole32(cksumcmd | curlen);
   7913 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7914 				    = 0;
   7915 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7916 				    = cksumfields;
   7917 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7918 				lasttx = nexttx;
   7919 
   7920 				DPRINTF(WM_DEBUG_TX,
   7921 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7922 					"len %#04zx\n",
   7923 					device_xname(sc->sc_dev), nexttx,
   7924 					(uint64_t)curaddr, curlen));
   7925 			}
   7926 		}
   7927 
   7928 		KASSERT(lasttx != -1);
   7929 
   7930 		/*
   7931 		 * Set up the command byte on the last descriptor of
   7932 		 * the packet. If we're in the interrupt delay window,
   7933 		 * delay the interrupt.
   7934 		 */
   7935 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7936 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7937 
   7938 		/*
   7939 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7940 		 * up the descriptor to encapsulate the packet for us.
   7941 		 *
   7942 		 * This is only valid on the last descriptor of the packet.
   7943 		 */
   7944 		if (vlan_has_tag(m0)) {
   7945 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7946 			    htole32(WTX_CMD_VLE);
   7947 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7948 			    = htole16(vlan_get_tag(m0));
   7949 		}
   7950 
   7951 		txs->txs_lastdesc = lasttx;
   7952 
   7953 		DPRINTF(WM_DEBUG_TX,
   7954 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7955 			device_xname(sc->sc_dev),
   7956 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7957 
   7958 		/* Sync the descriptors we're using. */
   7959 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7960 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7961 
   7962 		/* Give the packet to the chip. */
   7963 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7964 
   7965 		DPRINTF(WM_DEBUG_TX,
   7966 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7967 
   7968 		DPRINTF(WM_DEBUG_TX,
   7969 		    ("%s: TX: finished transmitting packet, job %d\n",
   7970 			device_xname(sc->sc_dev), txq->txq_snext));
   7971 
   7972 		/* Advance the tx pointer. */
   7973 		txq->txq_free -= txs->txs_ndesc;
   7974 		txq->txq_next = nexttx;
   7975 
   7976 		txq->txq_sfree--;
   7977 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7978 
   7979 		/* Pass the packet to any BPF listeners. */
   7980 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7981 	}
   7982 
   7983 	if (m0 != NULL) {
   7984 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7985 		WM_Q_EVCNT_INCR(txq, descdrop);
   7986 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7987 			__func__));
   7988 		m_freem(m0);
   7989 	}
   7990 
   7991 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7992 		/* No more slots; notify upper layer. */
   7993 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7994 	}
   7995 
   7996 	if (txq->txq_free != ofree) {
   7997 		/* Set a watchdog timer in case the chip flakes out. */
   7998 		txq->txq_lastsent = time_uptime;
   7999 		txq->txq_sending = true;
   8000 	}
   8001 }
   8002 
   8003 /*
   8004  * wm_nq_tx_offload:
   8005  *
   8006  *	Set up TCP/IP checksumming parameters for the
   8007  *	specified packet, for NEWQUEUE devices
   8008  */
   8009 static void
   8010 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8011     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8012 {
   8013 	struct mbuf *m0 = txs->txs_mbuf;
   8014 	uint32_t vl_len, mssidx, cmdc;
   8015 	struct ether_header *eh;
   8016 	int offset, iphl;
   8017 
   8018 	/*
   8019 	 * XXX It would be nice if the mbuf pkthdr had offset
   8020 	 * fields for the protocol headers.
   8021 	 */
   8022 	*cmdlenp = 0;
   8023 	*fieldsp = 0;
   8024 
   8025 	eh = mtod(m0, struct ether_header *);
   8026 	switch (htons(eh->ether_type)) {
   8027 	case ETHERTYPE_IP:
   8028 	case ETHERTYPE_IPV6:
   8029 		offset = ETHER_HDR_LEN;
   8030 		break;
   8031 
   8032 	case ETHERTYPE_VLAN:
   8033 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8034 		break;
   8035 
   8036 	default:
   8037 		/* Don't support this protocol or encapsulation. */
   8038 		*do_csum = false;
   8039 		return;
   8040 	}
   8041 	*do_csum = true;
   8042 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8043 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8044 
   8045 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8046 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8047 
   8048 	if ((m0->m_pkthdr.csum_flags &
   8049 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8050 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8051 	} else {
   8052 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8053 	}
   8054 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8055 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8056 
   8057 	if (vlan_has_tag(m0)) {
   8058 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8059 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8060 		*cmdlenp |= NQTX_CMD_VLE;
   8061 	}
   8062 
   8063 	mssidx = 0;
   8064 
   8065 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8066 		int hlen = offset + iphl;
   8067 		int tcp_hlen;
   8068 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8069 
   8070 		if (__predict_false(m0->m_len <
   8071 				    (hlen + sizeof(struct tcphdr)))) {
   8072 			/*
   8073 			 * TCP/IP headers are not in the first mbuf; we need
   8074 			 * to do this the slow and painful way. Let's just
   8075 			 * hope this doesn't happen very often.
   8076 			 */
   8077 			struct tcphdr th;
   8078 
   8079 			WM_Q_EVCNT_INCR(txq, tsopain);
   8080 
   8081 			m_copydata(m0, hlen, sizeof(th), &th);
   8082 			if (v4) {
   8083 				struct ip ip;
   8084 
   8085 				m_copydata(m0, offset, sizeof(ip), &ip);
   8086 				ip.ip_len = 0;
   8087 				m_copyback(m0,
   8088 				    offset + offsetof(struct ip, ip_len),
   8089 				    sizeof(ip.ip_len), &ip.ip_len);
   8090 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8091 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8092 			} else {
   8093 				struct ip6_hdr ip6;
   8094 
   8095 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8096 				ip6.ip6_plen = 0;
   8097 				m_copyback(m0,
   8098 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8099 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8100 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8101 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8102 			}
   8103 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8104 			    sizeof(th.th_sum), &th.th_sum);
   8105 
   8106 			tcp_hlen = th.th_off << 2;
   8107 		} else {
   8108 			/*
   8109 			 * TCP/IP headers are in the first mbuf; we can do
   8110 			 * this the easy way.
   8111 			 */
   8112 			struct tcphdr *th;
   8113 
   8114 			if (v4) {
   8115 				struct ip *ip =
   8116 				    (void *)(mtod(m0, char *) + offset);
   8117 				th = (void *)(mtod(m0, char *) + hlen);
   8118 
   8119 				ip->ip_len = 0;
   8120 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8121 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8122 			} else {
   8123 				struct ip6_hdr *ip6 =
   8124 				    (void *)(mtod(m0, char *) + offset);
   8125 				th = (void *)(mtod(m0, char *) + hlen);
   8126 
   8127 				ip6->ip6_plen = 0;
   8128 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8129 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8130 			}
   8131 			tcp_hlen = th->th_off << 2;
   8132 		}
   8133 		hlen += tcp_hlen;
   8134 		*cmdlenp |= NQTX_CMD_TSE;
   8135 
   8136 		if (v4) {
   8137 			WM_Q_EVCNT_INCR(txq, tso);
   8138 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8139 		} else {
   8140 			WM_Q_EVCNT_INCR(txq, tso6);
   8141 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8142 		}
   8143 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8144 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8145 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8146 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8147 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8148 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8149 	} else {
   8150 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8151 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8152 	}
   8153 
   8154 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8155 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8156 		cmdc |= NQTXC_CMD_IP4;
   8157 	}
   8158 
   8159 	if (m0->m_pkthdr.csum_flags &
   8160 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8161 		WM_Q_EVCNT_INCR(txq, tusum);
   8162 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8163 			cmdc |= NQTXC_CMD_TCP;
   8164 		else
   8165 			cmdc |= NQTXC_CMD_UDP;
   8166 
   8167 		cmdc |= NQTXC_CMD_IP4;
   8168 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8169 	}
   8170 	if (m0->m_pkthdr.csum_flags &
   8171 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8172 		WM_Q_EVCNT_INCR(txq, tusum6);
   8173 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8174 			cmdc |= NQTXC_CMD_TCP;
   8175 		else
   8176 			cmdc |= NQTXC_CMD_UDP;
   8177 
   8178 		cmdc |= NQTXC_CMD_IP6;
   8179 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8180 	}
   8181 
   8182 	/*
   8183 	 * We don't have to write context descriptor for every packet to
   8184 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8185 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8186 	 * controllers.
   8187 	 * It would be overhead to write context descriptor for every packet,
   8188 	 * however it does not cause problems.
   8189 	 */
   8190 	/* Fill in the context descriptor. */
   8191 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8192 	    htole32(vl_len);
   8193 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8194 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8195 	    htole32(cmdc);
   8196 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8197 	    htole32(mssidx);
   8198 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8199 	DPRINTF(WM_DEBUG_TX,
   8200 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8201 		txq->txq_next, 0, vl_len));
   8202 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8203 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8204 	txs->txs_ndesc++;
   8205 }
   8206 
   8207 /*
   8208  * wm_nq_start:		[ifnet interface function]
   8209  *
   8210  *	Start packet transmission on the interface for NEWQUEUE devices
   8211  */
   8212 static void
   8213 wm_nq_start(struct ifnet *ifp)
   8214 {
   8215 	struct wm_softc *sc = ifp->if_softc;
   8216 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8217 
   8218 #ifdef WM_MPSAFE
   8219 	KASSERT(if_is_mpsafe(ifp));
   8220 #endif
   8221 	/*
   8222 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8223 	 */
   8224 
   8225 	mutex_enter(txq->txq_lock);
   8226 	if (!txq->txq_stopping)
   8227 		wm_nq_start_locked(ifp);
   8228 	mutex_exit(txq->txq_lock);
   8229 }
   8230 
   8231 static void
   8232 wm_nq_start_locked(struct ifnet *ifp)
   8233 {
   8234 	struct wm_softc *sc = ifp->if_softc;
   8235 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8236 
   8237 	wm_nq_send_common_locked(ifp, txq, false);
   8238 }
   8239 
   8240 static int
   8241 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8242 {
   8243 	int qid;
   8244 	struct wm_softc *sc = ifp->if_softc;
   8245 	struct wm_txqueue *txq;
   8246 
   8247 	qid = wm_select_txqueue(ifp, m);
   8248 	txq = &sc->sc_queue[qid].wmq_txq;
   8249 
   8250 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8251 		m_freem(m);
   8252 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8253 		return ENOBUFS;
   8254 	}
   8255 
   8256 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8257 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8258 	if (m->m_flags & M_MCAST)
   8259 		if_statinc_ref(nsr, if_omcasts);
   8260 	IF_STAT_PUTREF(ifp);
   8261 
   8262 	/*
   8263 	 * The situations which this mutex_tryenter() fails at running time
   8264 	 * are below two patterns.
   8265 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8266 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8267 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8268 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8269 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8270 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8271 	 * stuck, either.
   8272 	 */
   8273 	if (mutex_tryenter(txq->txq_lock)) {
   8274 		if (!txq->txq_stopping)
   8275 			wm_nq_transmit_locked(ifp, txq);
   8276 		mutex_exit(txq->txq_lock);
   8277 	}
   8278 
   8279 	return 0;
   8280 }
   8281 
   8282 static void
   8283 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8284 {
   8285 
   8286 	wm_nq_send_common_locked(ifp, txq, true);
   8287 }
   8288 
   8289 static void
   8290 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8291     bool is_transmit)
   8292 {
   8293 	struct wm_softc *sc = ifp->if_softc;
   8294 	struct mbuf *m0;
   8295 	struct wm_txsoft *txs;
   8296 	bus_dmamap_t dmamap;
   8297 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8298 	bool do_csum, sent;
   8299 	bool remap = true;
   8300 
   8301 	KASSERT(mutex_owned(txq->txq_lock));
   8302 
   8303 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8304 		return;
   8305 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8306 		return;
   8307 
   8308 	sent = false;
   8309 
   8310 	/*
   8311 	 * Loop through the send queue, setting up transmit descriptors
   8312 	 * until we drain the queue, or use up all available transmit
   8313 	 * descriptors.
   8314 	 */
   8315 	for (;;) {
   8316 		m0 = NULL;
   8317 
   8318 		/* Get a work queue entry. */
   8319 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8320 			wm_txeof(txq, UINT_MAX);
   8321 			if (txq->txq_sfree == 0) {
   8322 				DPRINTF(WM_DEBUG_TX,
   8323 				    ("%s: TX: no free job descriptors\n",
   8324 					device_xname(sc->sc_dev)));
   8325 				WM_Q_EVCNT_INCR(txq, txsstall);
   8326 				break;
   8327 			}
   8328 		}
   8329 
   8330 		/* Grab a packet off the queue. */
   8331 		if (is_transmit)
   8332 			m0 = pcq_get(txq->txq_interq);
   8333 		else
   8334 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8335 		if (m0 == NULL)
   8336 			break;
   8337 
   8338 		DPRINTF(WM_DEBUG_TX,
   8339 		    ("%s: TX: have packet to transmit: %p\n",
   8340 		    device_xname(sc->sc_dev), m0));
   8341 
   8342 		txs = &txq->txq_soft[txq->txq_snext];
   8343 		dmamap = txs->txs_dmamap;
   8344 
   8345 		/*
   8346 		 * Load the DMA map.  If this fails, the packet either
   8347 		 * didn't fit in the allotted number of segments, or we
   8348 		 * were short on resources.  For the too-many-segments
   8349 		 * case, we simply report an error and drop the packet,
   8350 		 * since we can't sanely copy a jumbo packet to a single
   8351 		 * buffer.
   8352 		 */
   8353 retry:
   8354 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8355 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8356 		if (__predict_false(error)) {
   8357 			if (error == EFBIG) {
   8358 				if (remap == true) {
   8359 					struct mbuf *m;
   8360 
   8361 					remap = false;
   8362 					m = m_defrag(m0, M_NOWAIT);
   8363 					if (m != NULL) {
   8364 						WM_Q_EVCNT_INCR(txq, defrag);
   8365 						m0 = m;
   8366 						goto retry;
   8367 					}
   8368 				}
   8369 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8370 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8371 				    "DMA segments, dropping...\n",
   8372 				    device_xname(sc->sc_dev));
   8373 				wm_dump_mbuf_chain(sc, m0);
   8374 				m_freem(m0);
   8375 				continue;
   8376 			}
   8377 			/* Short on resources, just stop for now. */
   8378 			DPRINTF(WM_DEBUG_TX,
   8379 			    ("%s: TX: dmamap load failed: %d\n",
   8380 				device_xname(sc->sc_dev), error));
   8381 			break;
   8382 		}
   8383 
   8384 		segs_needed = dmamap->dm_nsegs;
   8385 
   8386 		/*
   8387 		 * Ensure we have enough descriptors free to describe
   8388 		 * the packet. Note, we always reserve one descriptor
   8389 		 * at the end of the ring due to the semantics of the
   8390 		 * TDT register, plus one more in the event we need
   8391 		 * to load offload context.
   8392 		 */
   8393 		if (segs_needed > txq->txq_free - 2) {
   8394 			/*
   8395 			 * Not enough free descriptors to transmit this
   8396 			 * packet.  We haven't committed anything yet,
   8397 			 * so just unload the DMA map, put the packet
   8398 			 * pack on the queue, and punt. Notify the upper
   8399 			 * layer that there are no more slots left.
   8400 			 */
   8401 			DPRINTF(WM_DEBUG_TX,
   8402 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8403 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8404 				segs_needed, txq->txq_free - 1));
   8405 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8406 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8407 			WM_Q_EVCNT_INCR(txq, txdstall);
   8408 			break;
   8409 		}
   8410 
   8411 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8412 
   8413 		DPRINTF(WM_DEBUG_TX,
   8414 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8415 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8416 
   8417 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8418 
   8419 		/*
   8420 		 * Store a pointer to the packet so that we can free it
   8421 		 * later.
   8422 		 *
   8423 		 * Initially, we consider the number of descriptors the
   8424 		 * packet uses the number of DMA segments.  This may be
   8425 		 * incremented by 1 if we do checksum offload (a descriptor
   8426 		 * is used to set the checksum context).
   8427 		 */
   8428 		txs->txs_mbuf = m0;
   8429 		txs->txs_firstdesc = txq->txq_next;
   8430 		txs->txs_ndesc = segs_needed;
   8431 
   8432 		/* Set up offload parameters for this packet. */
   8433 		uint32_t cmdlen, fields, dcmdlen;
   8434 		if (m0->m_pkthdr.csum_flags &
   8435 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8436 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8437 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8438 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8439 			    &do_csum);
   8440 		} else {
   8441 			do_csum = false;
   8442 			cmdlen = 0;
   8443 			fields = 0;
   8444 		}
   8445 
   8446 		/* Sync the DMA map. */
   8447 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8448 		    BUS_DMASYNC_PREWRITE);
   8449 
   8450 		/* Initialize the first transmit descriptor. */
   8451 		nexttx = txq->txq_next;
   8452 		if (!do_csum) {
   8453 			/* Setup a legacy descriptor */
   8454 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8455 			    dmamap->dm_segs[0].ds_addr);
   8456 			txq->txq_descs[nexttx].wtx_cmdlen =
   8457 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8458 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8459 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8460 			if (vlan_has_tag(m0)) {
   8461 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8462 				    htole32(WTX_CMD_VLE);
   8463 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8464 				    htole16(vlan_get_tag(m0));
   8465 			} else
   8466 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8467 
   8468 			dcmdlen = 0;
   8469 		} else {
   8470 			/* Setup an advanced data descriptor */
   8471 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8472 			    htole64(dmamap->dm_segs[0].ds_addr);
   8473 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8474 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8475 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8476 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8477 			    htole32(fields);
   8478 			DPRINTF(WM_DEBUG_TX,
   8479 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8480 				device_xname(sc->sc_dev), nexttx,
   8481 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8482 			DPRINTF(WM_DEBUG_TX,
   8483 			    ("\t 0x%08x%08x\n", fields,
   8484 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8485 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8486 		}
   8487 
   8488 		lasttx = nexttx;
   8489 		nexttx = WM_NEXTTX(txq, nexttx);
   8490 		/*
   8491 		 * Fill in the next descriptors. legacy or advanced format
   8492 		 * is the same here
   8493 		 */
   8494 		for (seg = 1; seg < dmamap->dm_nsegs;
   8495 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8496 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8497 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8498 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8499 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8500 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8501 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8502 			lasttx = nexttx;
   8503 
   8504 			DPRINTF(WM_DEBUG_TX,
   8505 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8506 				device_xname(sc->sc_dev), nexttx,
   8507 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8508 				dmamap->dm_segs[seg].ds_len));
   8509 		}
   8510 
   8511 		KASSERT(lasttx != -1);
   8512 
   8513 		/*
   8514 		 * Set up the command byte on the last descriptor of
   8515 		 * the packet. If we're in the interrupt delay window,
   8516 		 * delay the interrupt.
   8517 		 */
   8518 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8519 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8520 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8521 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8522 
   8523 		txs->txs_lastdesc = lasttx;
   8524 
   8525 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8526 		    device_xname(sc->sc_dev),
   8527 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8528 
   8529 		/* Sync the descriptors we're using. */
   8530 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8531 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8532 
   8533 		/* Give the packet to the chip. */
   8534 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8535 		sent = true;
   8536 
   8537 		DPRINTF(WM_DEBUG_TX,
   8538 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8539 
   8540 		DPRINTF(WM_DEBUG_TX,
   8541 		    ("%s: TX: finished transmitting packet, job %d\n",
   8542 			device_xname(sc->sc_dev), txq->txq_snext));
   8543 
   8544 		/* Advance the tx pointer. */
   8545 		txq->txq_free -= txs->txs_ndesc;
   8546 		txq->txq_next = nexttx;
   8547 
   8548 		txq->txq_sfree--;
   8549 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8550 
   8551 		/* Pass the packet to any BPF listeners. */
   8552 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8553 	}
   8554 
   8555 	if (m0 != NULL) {
   8556 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8557 		WM_Q_EVCNT_INCR(txq, descdrop);
   8558 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8559 			__func__));
   8560 		m_freem(m0);
   8561 	}
   8562 
   8563 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8564 		/* No more slots; notify upper layer. */
   8565 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8566 	}
   8567 
   8568 	if (sent) {
   8569 		/* Set a watchdog timer in case the chip flakes out. */
   8570 		txq->txq_lastsent = time_uptime;
   8571 		txq->txq_sending = true;
   8572 	}
   8573 }
   8574 
   8575 static void
   8576 wm_deferred_start_locked(struct wm_txqueue *txq)
   8577 {
   8578 	struct wm_softc *sc = txq->txq_sc;
   8579 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8580 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8581 	int qid = wmq->wmq_id;
   8582 
   8583 	KASSERT(mutex_owned(txq->txq_lock));
   8584 
   8585 	if (txq->txq_stopping) {
   8586 		mutex_exit(txq->txq_lock);
   8587 		return;
   8588 	}
   8589 
   8590 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8591 		/* XXX need for ALTQ or one CPU system */
   8592 		if (qid == 0)
   8593 			wm_nq_start_locked(ifp);
   8594 		wm_nq_transmit_locked(ifp, txq);
   8595 	} else {
   8596 		/* XXX need for ALTQ or one CPU system */
   8597 		if (qid == 0)
   8598 			wm_start_locked(ifp);
   8599 		wm_transmit_locked(ifp, txq);
   8600 	}
   8601 }
   8602 
   8603 /* Interrupt */
   8604 
   8605 /*
   8606  * wm_txeof:
   8607  *
   8608  *	Helper; handle transmit interrupts.
   8609  */
   8610 static bool
   8611 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8612 {
   8613 	struct wm_softc *sc = txq->txq_sc;
   8614 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8615 	struct wm_txsoft *txs;
   8616 	int count = 0;
   8617 	int i;
   8618 	uint8_t status;
   8619 	bool more = false;
   8620 
   8621 	KASSERT(mutex_owned(txq->txq_lock));
   8622 
   8623 	if (txq->txq_stopping)
   8624 		return false;
   8625 
   8626 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8627 
   8628 	/*
   8629 	 * Go through the Tx list and free mbufs for those
   8630 	 * frames which have been transmitted.
   8631 	 */
   8632 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8633 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8634 		if (limit-- == 0) {
   8635 			more = true;
   8636 			DPRINTF(WM_DEBUG_TX,
   8637 			    ("%s: TX: loop limited, job %d is not processed\n",
   8638 				device_xname(sc->sc_dev), i));
   8639 			break;
   8640 		}
   8641 
   8642 		txs = &txq->txq_soft[i];
   8643 
   8644 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8645 			device_xname(sc->sc_dev), i));
   8646 
   8647 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8648 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8649 
   8650 		status =
   8651 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8652 		if ((status & WTX_ST_DD) == 0) {
   8653 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8654 			    BUS_DMASYNC_PREREAD);
   8655 			break;
   8656 		}
   8657 
   8658 		count++;
   8659 		DPRINTF(WM_DEBUG_TX,
   8660 		    ("%s: TX: job %d done: descs %d..%d\n",
   8661 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8662 		    txs->txs_lastdesc));
   8663 
   8664 		/*
   8665 		 * XXX We should probably be using the statistics
   8666 		 * XXX registers, but I don't know if they exist
   8667 		 * XXX on chips before the i82544.
   8668 		 */
   8669 
   8670 #ifdef WM_EVENT_COUNTERS
   8671 		if (status & WTX_ST_TU)
   8672 			WM_Q_EVCNT_INCR(txq, underrun);
   8673 #endif /* WM_EVENT_COUNTERS */
   8674 
   8675 		/*
   8676 		 * 82574 and newer's document says the status field has neither
   8677 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8678 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8679 		 * Developer's Manual", 82574 datasheet and newer.
   8680 		 *
   8681 		 * XXX I saw the LC bit was set on I218 even though the media
   8682 		 * was full duplex, so the bit might be used for other
   8683 		 * meaning ...(I have no document).
   8684 		 */
   8685 
   8686 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8687 		    && ((sc->sc_type < WM_T_82574)
   8688 			|| (sc->sc_type == WM_T_80003))) {
   8689 			if_statinc(ifp, if_oerrors);
   8690 			if (status & WTX_ST_LC)
   8691 				log(LOG_WARNING, "%s: late collision\n",
   8692 				    device_xname(sc->sc_dev));
   8693 			else if (status & WTX_ST_EC) {
   8694 				if_statadd(ifp, if_collisions,
   8695 				    TX_COLLISION_THRESHOLD + 1);
   8696 				log(LOG_WARNING, "%s: excessive collisions\n",
   8697 				    device_xname(sc->sc_dev));
   8698 			}
   8699 		} else
   8700 			if_statinc(ifp, if_opackets);
   8701 
   8702 		txq->txq_packets++;
   8703 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8704 
   8705 		txq->txq_free += txs->txs_ndesc;
   8706 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8707 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8708 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8709 		m_freem(txs->txs_mbuf);
   8710 		txs->txs_mbuf = NULL;
   8711 	}
   8712 
   8713 	/* Update the dirty transmit buffer pointer. */
   8714 	txq->txq_sdirty = i;
   8715 	DPRINTF(WM_DEBUG_TX,
   8716 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8717 
   8718 	if (count != 0)
   8719 		rnd_add_uint32(&sc->rnd_source, count);
   8720 
   8721 	/*
   8722 	 * If there are no more pending transmissions, cancel the watchdog
   8723 	 * timer.
   8724 	 */
   8725 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8726 		txq->txq_sending = false;
   8727 
   8728 	return more;
   8729 }
   8730 
   8731 static inline uint32_t
   8732 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8733 {
   8734 	struct wm_softc *sc = rxq->rxq_sc;
   8735 
   8736 	if (sc->sc_type == WM_T_82574)
   8737 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8738 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8739 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8740 	else
   8741 		return rxq->rxq_descs[idx].wrx_status;
   8742 }
   8743 
   8744 static inline uint32_t
   8745 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8746 {
   8747 	struct wm_softc *sc = rxq->rxq_sc;
   8748 
   8749 	if (sc->sc_type == WM_T_82574)
   8750 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8751 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8752 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8753 	else
   8754 		return rxq->rxq_descs[idx].wrx_errors;
   8755 }
   8756 
   8757 static inline uint16_t
   8758 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8759 {
   8760 	struct wm_softc *sc = rxq->rxq_sc;
   8761 
   8762 	if (sc->sc_type == WM_T_82574)
   8763 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8764 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8765 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8766 	else
   8767 		return rxq->rxq_descs[idx].wrx_special;
   8768 }
   8769 
   8770 static inline int
   8771 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8772 {
   8773 	struct wm_softc *sc = rxq->rxq_sc;
   8774 
   8775 	if (sc->sc_type == WM_T_82574)
   8776 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8777 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8778 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8779 	else
   8780 		return rxq->rxq_descs[idx].wrx_len;
   8781 }
   8782 
   8783 #ifdef WM_DEBUG
   8784 static inline uint32_t
   8785 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8786 {
   8787 	struct wm_softc *sc = rxq->rxq_sc;
   8788 
   8789 	if (sc->sc_type == WM_T_82574)
   8790 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8791 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8792 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8793 	else
   8794 		return 0;
   8795 }
   8796 
   8797 static inline uint8_t
   8798 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8799 {
   8800 	struct wm_softc *sc = rxq->rxq_sc;
   8801 
   8802 	if (sc->sc_type == WM_T_82574)
   8803 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8804 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8805 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8806 	else
   8807 		return 0;
   8808 }
   8809 #endif /* WM_DEBUG */
   8810 
   8811 static inline bool
   8812 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8813     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8814 {
   8815 
   8816 	if (sc->sc_type == WM_T_82574)
   8817 		return (status & ext_bit) != 0;
   8818 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8819 		return (status & nq_bit) != 0;
   8820 	else
   8821 		return (status & legacy_bit) != 0;
   8822 }
   8823 
   8824 static inline bool
   8825 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8826     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8827 {
   8828 
   8829 	if (sc->sc_type == WM_T_82574)
   8830 		return (error & ext_bit) != 0;
   8831 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8832 		return (error & nq_bit) != 0;
   8833 	else
   8834 		return (error & legacy_bit) != 0;
   8835 }
   8836 
   8837 static inline bool
   8838 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8839 {
   8840 
   8841 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8842 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8843 		return true;
   8844 	else
   8845 		return false;
   8846 }
   8847 
   8848 static inline bool
   8849 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8850 {
   8851 	struct wm_softc *sc = rxq->rxq_sc;
   8852 
   8853 	/* XXX missing error bit for newqueue? */
   8854 	if (wm_rxdesc_is_set_error(sc, errors,
   8855 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8856 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8857 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8858 		NQRXC_ERROR_RXE)) {
   8859 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8860 		    EXTRXC_ERROR_SE, 0))
   8861 			log(LOG_WARNING, "%s: symbol error\n",
   8862 			    device_xname(sc->sc_dev));
   8863 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8864 		    EXTRXC_ERROR_SEQ, 0))
   8865 			log(LOG_WARNING, "%s: receive sequence error\n",
   8866 			    device_xname(sc->sc_dev));
   8867 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8868 		    EXTRXC_ERROR_CE, 0))
   8869 			log(LOG_WARNING, "%s: CRC error\n",
   8870 			    device_xname(sc->sc_dev));
   8871 		return true;
   8872 	}
   8873 
   8874 	return false;
   8875 }
   8876 
   8877 static inline bool
   8878 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8879 {
   8880 	struct wm_softc *sc = rxq->rxq_sc;
   8881 
   8882 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8883 		NQRXC_STATUS_DD)) {
   8884 		/* We have processed all of the receive descriptors. */
   8885 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8886 		return false;
   8887 	}
   8888 
   8889 	return true;
   8890 }
   8891 
   8892 static inline bool
   8893 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8894     uint16_t vlantag, struct mbuf *m)
   8895 {
   8896 
   8897 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8898 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8899 		vlan_set_tag(m, le16toh(vlantag));
   8900 	}
   8901 
   8902 	return true;
   8903 }
   8904 
   8905 static inline void
   8906 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8907     uint32_t errors, struct mbuf *m)
   8908 {
   8909 	struct wm_softc *sc = rxq->rxq_sc;
   8910 
   8911 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8912 		if (wm_rxdesc_is_set_status(sc, status,
   8913 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8914 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8915 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8916 			if (wm_rxdesc_is_set_error(sc, errors,
   8917 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8918 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8919 		}
   8920 		if (wm_rxdesc_is_set_status(sc, status,
   8921 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8922 			/*
   8923 			 * Note: we don't know if this was TCP or UDP,
   8924 			 * so we just set both bits, and expect the
   8925 			 * upper layers to deal.
   8926 			 */
   8927 			WM_Q_EVCNT_INCR(rxq, tusum);
   8928 			m->m_pkthdr.csum_flags |=
   8929 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8930 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8931 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8932 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8933 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8934 		}
   8935 	}
   8936 }
   8937 
   8938 /*
   8939  * wm_rxeof:
   8940  *
   8941  *	Helper; handle receive interrupts.
   8942  */
   8943 static bool
   8944 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8945 {
   8946 	struct wm_softc *sc = rxq->rxq_sc;
   8947 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8948 	struct wm_rxsoft *rxs;
   8949 	struct mbuf *m;
   8950 	int i, len;
   8951 	int count = 0;
   8952 	uint32_t status, errors;
   8953 	uint16_t vlantag;
   8954 	bool more = false;
   8955 
   8956 	KASSERT(mutex_owned(rxq->rxq_lock));
   8957 
   8958 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8959 		if (limit-- == 0) {
   8960 			rxq->rxq_ptr = i;
   8961 			more = true;
   8962 			DPRINTF(WM_DEBUG_RX,
   8963 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8964 				device_xname(sc->sc_dev), i));
   8965 			break;
   8966 		}
   8967 
   8968 		rxs = &rxq->rxq_soft[i];
   8969 
   8970 		DPRINTF(WM_DEBUG_RX,
   8971 		    ("%s: RX: checking descriptor %d\n",
   8972 			device_xname(sc->sc_dev), i));
   8973 		wm_cdrxsync(rxq, i,
   8974 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8975 
   8976 		status = wm_rxdesc_get_status(rxq, i);
   8977 		errors = wm_rxdesc_get_errors(rxq, i);
   8978 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8979 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8980 #ifdef WM_DEBUG
   8981 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8982 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8983 #endif
   8984 
   8985 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8986 			/*
   8987 			 * Update the receive pointer holding rxq_lock
   8988 			 * consistent with increment counter.
   8989 			 */
   8990 			rxq->rxq_ptr = i;
   8991 			break;
   8992 		}
   8993 
   8994 		count++;
   8995 		if (__predict_false(rxq->rxq_discard)) {
   8996 			DPRINTF(WM_DEBUG_RX,
   8997 			    ("%s: RX: discarding contents of descriptor %d\n",
   8998 				device_xname(sc->sc_dev), i));
   8999 			wm_init_rxdesc(rxq, i);
   9000 			if (wm_rxdesc_is_eop(rxq, status)) {
   9001 				/* Reset our state. */
   9002 				DPRINTF(WM_DEBUG_RX,
   9003 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9004 					device_xname(sc->sc_dev)));
   9005 				rxq->rxq_discard = 0;
   9006 			}
   9007 			continue;
   9008 		}
   9009 
   9010 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9011 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9012 
   9013 		m = rxs->rxs_mbuf;
   9014 
   9015 		/*
   9016 		 * Add a new receive buffer to the ring, unless of
   9017 		 * course the length is zero. Treat the latter as a
   9018 		 * failed mapping.
   9019 		 */
   9020 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9021 			/*
   9022 			 * Failed, throw away what we've done so
   9023 			 * far, and discard the rest of the packet.
   9024 			 */
   9025 			if_statinc(ifp, if_ierrors);
   9026 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9027 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9028 			wm_init_rxdesc(rxq, i);
   9029 			if (!wm_rxdesc_is_eop(rxq, status))
   9030 				rxq->rxq_discard = 1;
   9031 			if (rxq->rxq_head != NULL)
   9032 				m_freem(rxq->rxq_head);
   9033 			WM_RXCHAIN_RESET(rxq);
   9034 			DPRINTF(WM_DEBUG_RX,
   9035 			    ("%s: RX: Rx buffer allocation failed, "
   9036 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9037 				rxq->rxq_discard ? " (discard)" : ""));
   9038 			continue;
   9039 		}
   9040 
   9041 		m->m_len = len;
   9042 		rxq->rxq_len += len;
   9043 		DPRINTF(WM_DEBUG_RX,
   9044 		    ("%s: RX: buffer at %p len %d\n",
   9045 			device_xname(sc->sc_dev), m->m_data, len));
   9046 
   9047 		/* If this is not the end of the packet, keep looking. */
   9048 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9049 			WM_RXCHAIN_LINK(rxq, m);
   9050 			DPRINTF(WM_DEBUG_RX,
   9051 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9052 				device_xname(sc->sc_dev), rxq->rxq_len));
   9053 			continue;
   9054 		}
   9055 
   9056 		/*
   9057 		 * Okay, we have the entire packet now. The chip is
   9058 		 * configured to include the FCS except I350 and I21[01]
   9059 		 * (not all chips can be configured to strip it),
   9060 		 * so we need to trim it.
   9061 		 * May need to adjust length of previous mbuf in the
   9062 		 * chain if the current mbuf is too short.
   9063 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   9064 		 * is always set in I350, so we don't trim it.
   9065 		 */
   9066 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   9067 		    && (sc->sc_type != WM_T_I210)
   9068 		    && (sc->sc_type != WM_T_I211)) {
   9069 			if (m->m_len < ETHER_CRC_LEN) {
   9070 				rxq->rxq_tail->m_len
   9071 				    -= (ETHER_CRC_LEN - m->m_len);
   9072 				m->m_len = 0;
   9073 			} else
   9074 				m->m_len -= ETHER_CRC_LEN;
   9075 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9076 		} else
   9077 			len = rxq->rxq_len;
   9078 
   9079 		WM_RXCHAIN_LINK(rxq, m);
   9080 
   9081 		*rxq->rxq_tailp = NULL;
   9082 		m = rxq->rxq_head;
   9083 
   9084 		WM_RXCHAIN_RESET(rxq);
   9085 
   9086 		DPRINTF(WM_DEBUG_RX,
   9087 		    ("%s: RX: have entire packet, len -> %d\n",
   9088 			device_xname(sc->sc_dev), len));
   9089 
   9090 		/* If an error occurred, update stats and drop the packet. */
   9091 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9092 			m_freem(m);
   9093 			continue;
   9094 		}
   9095 
   9096 		/* No errors.  Receive the packet. */
   9097 		m_set_rcvif(m, ifp);
   9098 		m->m_pkthdr.len = len;
   9099 		/*
   9100 		 * TODO
   9101 		 * should be save rsshash and rsstype to this mbuf.
   9102 		 */
   9103 		DPRINTF(WM_DEBUG_RX,
   9104 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9105 			device_xname(sc->sc_dev), rsstype, rsshash));
   9106 
   9107 		/*
   9108 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9109 		 * for us.  Associate the tag with the packet.
   9110 		 */
   9111 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9112 			continue;
   9113 
   9114 		/* Set up checksum info for this packet. */
   9115 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9116 		/*
   9117 		 * Update the receive pointer holding rxq_lock consistent with
   9118 		 * increment counter.
   9119 		 */
   9120 		rxq->rxq_ptr = i;
   9121 		rxq->rxq_packets++;
   9122 		rxq->rxq_bytes += len;
   9123 		mutex_exit(rxq->rxq_lock);
   9124 
   9125 		/* Pass it on. */
   9126 		if_percpuq_enqueue(sc->sc_ipq, m);
   9127 
   9128 		mutex_enter(rxq->rxq_lock);
   9129 
   9130 		if (rxq->rxq_stopping)
   9131 			break;
   9132 	}
   9133 
   9134 	if (count != 0)
   9135 		rnd_add_uint32(&sc->rnd_source, count);
   9136 
   9137 	DPRINTF(WM_DEBUG_RX,
   9138 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9139 
   9140 	return more;
   9141 }
   9142 
   9143 /*
   9144  * wm_linkintr_gmii:
   9145  *
   9146  *	Helper; handle link interrupts for GMII.
   9147  */
   9148 static void
   9149 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9150 {
   9151 	device_t dev = sc->sc_dev;
   9152 	uint32_t status, reg;
   9153 	bool link;
   9154 	int rv;
   9155 
   9156 	KASSERT(WM_CORE_LOCKED(sc));
   9157 
   9158 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9159 		__func__));
   9160 
   9161 	if ((icr & ICR_LSC) == 0) {
   9162 		if (icr & ICR_RXSEQ)
   9163 			DPRINTF(WM_DEBUG_LINK,
   9164 			    ("%s: LINK Receive sequence error\n",
   9165 				device_xname(dev)));
   9166 		return;
   9167 	}
   9168 
   9169 	/* Link status changed */
   9170 	status = CSR_READ(sc, WMREG_STATUS);
   9171 	link = status & STATUS_LU;
   9172 	if (link) {
   9173 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9174 			device_xname(dev),
   9175 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9176 	} else {
   9177 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9178 			device_xname(dev)));
   9179 	}
   9180 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9181 		wm_gig_downshift_workaround_ich8lan(sc);
   9182 
   9183 	if ((sc->sc_type == WM_T_ICH8)
   9184 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9185 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9186 	}
   9187 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9188 		device_xname(dev)));
   9189 	mii_pollstat(&sc->sc_mii);
   9190 	if (sc->sc_type == WM_T_82543) {
   9191 		int miistatus, active;
   9192 
   9193 		/*
   9194 		 * With 82543, we need to force speed and
   9195 		 * duplex on the MAC equal to what the PHY
   9196 		 * speed and duplex configuration is.
   9197 		 */
   9198 		miistatus = sc->sc_mii.mii_media_status;
   9199 
   9200 		if (miistatus & IFM_ACTIVE) {
   9201 			active = sc->sc_mii.mii_media_active;
   9202 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9203 			switch (IFM_SUBTYPE(active)) {
   9204 			case IFM_10_T:
   9205 				sc->sc_ctrl |= CTRL_SPEED_10;
   9206 				break;
   9207 			case IFM_100_TX:
   9208 				sc->sc_ctrl |= CTRL_SPEED_100;
   9209 				break;
   9210 			case IFM_1000_T:
   9211 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9212 				break;
   9213 			default:
   9214 				/*
   9215 				 * Fiber?
   9216 				 * Shoud not enter here.
   9217 				 */
   9218 				device_printf(dev, "unknown media (%x)\n",
   9219 				    active);
   9220 				break;
   9221 			}
   9222 			if (active & IFM_FDX)
   9223 				sc->sc_ctrl |= CTRL_FD;
   9224 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9225 		}
   9226 	} else if (sc->sc_type == WM_T_PCH) {
   9227 		wm_k1_gig_workaround_hv(sc,
   9228 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9229 	}
   9230 
   9231 	/*
   9232 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9233 	 * aggressive resulting in many collisions. To avoid this, increase
   9234 	 * the IPG and reduce Rx latency in the PHY.
   9235 	 */
   9236 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9237 	    && link) {
   9238 		uint32_t tipg_reg;
   9239 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9240 		bool fdx;
   9241 		uint16_t emi_addr, emi_val;
   9242 
   9243 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9244 		tipg_reg &= ~TIPG_IPGT_MASK;
   9245 		fdx = status & STATUS_FD;
   9246 
   9247 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9248 			tipg_reg |= 0xff;
   9249 			/* Reduce Rx latency in analog PHY */
   9250 			emi_val = 0;
   9251 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9252 		    fdx && speed != STATUS_SPEED_1000) {
   9253 			tipg_reg |= 0xc;
   9254 			emi_val = 1;
   9255 		} else {
   9256 			/* Roll back the default values */
   9257 			tipg_reg |= 0x08;
   9258 			emi_val = 1;
   9259 		}
   9260 
   9261 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9262 
   9263 		rv = sc->phy.acquire(sc);
   9264 		if (rv)
   9265 			return;
   9266 
   9267 		if (sc->sc_type == WM_T_PCH2)
   9268 			emi_addr = I82579_RX_CONFIG;
   9269 		else
   9270 			emi_addr = I217_RX_CONFIG;
   9271 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9272 
   9273 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9274 			uint16_t phy_reg;
   9275 
   9276 			sc->phy.readreg_locked(dev, 2,
   9277 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9278 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9279 			if (speed == STATUS_SPEED_100
   9280 			    || speed == STATUS_SPEED_10)
   9281 				phy_reg |= 0x3e8;
   9282 			else
   9283 				phy_reg |= 0xfa;
   9284 			sc->phy.writereg_locked(dev, 2,
   9285 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9286 
   9287 			if (speed == STATUS_SPEED_1000) {
   9288 				sc->phy.readreg_locked(dev, 2,
   9289 				    HV_PM_CTRL, &phy_reg);
   9290 
   9291 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9292 
   9293 				sc->phy.writereg_locked(dev, 2,
   9294 				    HV_PM_CTRL, phy_reg);
   9295 			}
   9296 		}
   9297 		sc->phy.release(sc);
   9298 
   9299 		if (rv)
   9300 			return;
   9301 
   9302 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9303 			uint16_t data, ptr_gap;
   9304 
   9305 			if (speed == STATUS_SPEED_1000) {
   9306 				rv = sc->phy.acquire(sc);
   9307 				if (rv)
   9308 					return;
   9309 
   9310 				rv = sc->phy.readreg_locked(dev, 2,
   9311 				    I219_UNKNOWN1, &data);
   9312 				if (rv) {
   9313 					sc->phy.release(sc);
   9314 					return;
   9315 				}
   9316 
   9317 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9318 				if (ptr_gap < 0x18) {
   9319 					data &= ~(0x3ff << 2);
   9320 					data |= (0x18 << 2);
   9321 					rv = sc->phy.writereg_locked(dev,
   9322 					    2, I219_UNKNOWN1, data);
   9323 				}
   9324 				sc->phy.release(sc);
   9325 				if (rv)
   9326 					return;
   9327 			} else {
   9328 				rv = sc->phy.acquire(sc);
   9329 				if (rv)
   9330 					return;
   9331 
   9332 				rv = sc->phy.writereg_locked(dev, 2,
   9333 				    I219_UNKNOWN1, 0xc023);
   9334 				sc->phy.release(sc);
   9335 				if (rv)
   9336 					return;
   9337 
   9338 			}
   9339 		}
   9340 	}
   9341 
   9342 	/*
   9343 	 * I217 Packet Loss issue:
   9344 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9345 	 * on power up.
   9346 	 * Set the Beacon Duration for I217 to 8 usec
   9347 	 */
   9348 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9349 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9350 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9351 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9352 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9353 	}
   9354 
   9355 	/* Work-around I218 hang issue */
   9356 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9357 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9358 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9359 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9360 		wm_k1_workaround_lpt_lp(sc, link);
   9361 
   9362 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9363 		/*
   9364 		 * Set platform power management values for Latency
   9365 		 * Tolerance Reporting (LTR)
   9366 		 */
   9367 		wm_platform_pm_pch_lpt(sc,
   9368 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9369 	}
   9370 
   9371 	/* Clear link partner's EEE ability */
   9372 	sc->eee_lp_ability = 0;
   9373 
   9374 	/* FEXTNVM6 K1-off workaround */
   9375 	if (sc->sc_type == WM_T_PCH_SPT) {
   9376 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9377 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9378 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9379 		else
   9380 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9381 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9382 	}
   9383 
   9384 	if (!link)
   9385 		return;
   9386 
   9387 	switch (sc->sc_type) {
   9388 	case WM_T_PCH2:
   9389 		wm_k1_workaround_lv(sc);
   9390 		/* FALLTHROUGH */
   9391 	case WM_T_PCH:
   9392 		if (sc->sc_phytype == WMPHY_82578)
   9393 			wm_link_stall_workaround_hv(sc);
   9394 		break;
   9395 	default:
   9396 		break;
   9397 	}
   9398 
   9399 	/* Enable/Disable EEE after link up */
   9400 	if (sc->sc_phytype > WMPHY_82579)
   9401 		wm_set_eee_pchlan(sc);
   9402 }
   9403 
   9404 /*
   9405  * wm_linkintr_tbi:
   9406  *
   9407  *	Helper; handle link interrupts for TBI mode.
   9408  */
   9409 static void
   9410 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9411 {
   9412 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9413 	uint32_t status;
   9414 
   9415 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9416 		__func__));
   9417 
   9418 	status = CSR_READ(sc, WMREG_STATUS);
   9419 	if (icr & ICR_LSC) {
   9420 		wm_check_for_link(sc);
   9421 		if (status & STATUS_LU) {
   9422 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9423 				device_xname(sc->sc_dev),
   9424 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9425 			/*
   9426 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9427 			 * so we should update sc->sc_ctrl
   9428 			 */
   9429 
   9430 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9431 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9432 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9433 			if (status & STATUS_FD)
   9434 				sc->sc_tctl |=
   9435 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9436 			else
   9437 				sc->sc_tctl |=
   9438 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9439 			if (sc->sc_ctrl & CTRL_TFCE)
   9440 				sc->sc_fcrtl |= FCRTL_XONE;
   9441 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9442 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9443 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9444 			sc->sc_tbi_linkup = 1;
   9445 			if_link_state_change(ifp, LINK_STATE_UP);
   9446 		} else {
   9447 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9448 				device_xname(sc->sc_dev)));
   9449 			sc->sc_tbi_linkup = 0;
   9450 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9451 		}
   9452 		/* Update LED */
   9453 		wm_tbi_serdes_set_linkled(sc);
   9454 	} else if (icr & ICR_RXSEQ)
   9455 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9456 			device_xname(sc->sc_dev)));
   9457 }
   9458 
   9459 /*
   9460  * wm_linkintr_serdes:
   9461  *
   9462  *	Helper; handle link interrupts for TBI mode.
   9463  */
   9464 static void
   9465 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9466 {
   9467 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9468 	struct mii_data *mii = &sc->sc_mii;
   9469 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9470 	uint32_t pcs_adv, pcs_lpab, reg;
   9471 
   9472 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9473 		__func__));
   9474 
   9475 	if (icr & ICR_LSC) {
   9476 		/* Check PCS */
   9477 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9478 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9479 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9480 				device_xname(sc->sc_dev)));
   9481 			mii->mii_media_status |= IFM_ACTIVE;
   9482 			sc->sc_tbi_linkup = 1;
   9483 			if_link_state_change(ifp, LINK_STATE_UP);
   9484 		} else {
   9485 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9486 				device_xname(sc->sc_dev)));
   9487 			mii->mii_media_status |= IFM_NONE;
   9488 			sc->sc_tbi_linkup = 0;
   9489 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9490 			wm_tbi_serdes_set_linkled(sc);
   9491 			return;
   9492 		}
   9493 		mii->mii_media_active |= IFM_1000_SX;
   9494 		if ((reg & PCS_LSTS_FDX) != 0)
   9495 			mii->mii_media_active |= IFM_FDX;
   9496 		else
   9497 			mii->mii_media_active |= IFM_HDX;
   9498 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9499 			/* Check flow */
   9500 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9501 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9502 				DPRINTF(WM_DEBUG_LINK,
   9503 				    ("XXX LINKOK but not ACOMP\n"));
   9504 				return;
   9505 			}
   9506 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9507 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9508 			DPRINTF(WM_DEBUG_LINK,
   9509 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9510 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9511 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9512 				mii->mii_media_active |= IFM_FLOW
   9513 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9514 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9515 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9516 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9517 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9518 				mii->mii_media_active |= IFM_FLOW
   9519 				    | IFM_ETH_TXPAUSE;
   9520 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9521 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9522 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9523 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9524 				mii->mii_media_active |= IFM_FLOW
   9525 				    | IFM_ETH_RXPAUSE;
   9526 		}
   9527 		/* Update LED */
   9528 		wm_tbi_serdes_set_linkled(sc);
   9529 	} else
   9530 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9531 		    device_xname(sc->sc_dev)));
   9532 }
   9533 
   9534 /*
   9535  * wm_linkintr:
   9536  *
   9537  *	Helper; handle link interrupts.
   9538  */
   9539 static void
   9540 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9541 {
   9542 
   9543 	KASSERT(WM_CORE_LOCKED(sc));
   9544 
   9545 	if (sc->sc_flags & WM_F_HAS_MII)
   9546 		wm_linkintr_gmii(sc, icr);
   9547 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9548 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9549 		wm_linkintr_serdes(sc, icr);
   9550 	else
   9551 		wm_linkintr_tbi(sc, icr);
   9552 }
   9553 
   9554 
   9555 static inline void
   9556 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9557 {
   9558 
   9559 	if (wmq->wmq_txrx_use_workqueue)
   9560 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9561 	else
   9562 		softint_schedule(wmq->wmq_si);
   9563 }
   9564 
   9565 /*
   9566  * wm_intr_legacy:
   9567  *
   9568  *	Interrupt service routine for INTx and MSI.
   9569  */
   9570 static int
   9571 wm_intr_legacy(void *arg)
   9572 {
   9573 	struct wm_softc *sc = arg;
   9574 	struct wm_queue *wmq = &sc->sc_queue[0];
   9575 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9576 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9577 	uint32_t icr, rndval = 0;
   9578 	int handled = 0;
   9579 
   9580 	while (1 /* CONSTCOND */) {
   9581 		icr = CSR_READ(sc, WMREG_ICR);
   9582 		if ((icr & sc->sc_icr) == 0)
   9583 			break;
   9584 		if (handled == 0)
   9585 			DPRINTF(WM_DEBUG_TX,
   9586 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9587 		if (rndval == 0)
   9588 			rndval = icr;
   9589 
   9590 		mutex_enter(rxq->rxq_lock);
   9591 
   9592 		if (rxq->rxq_stopping) {
   9593 			mutex_exit(rxq->rxq_lock);
   9594 			break;
   9595 		}
   9596 
   9597 		handled = 1;
   9598 
   9599 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9600 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9601 			DPRINTF(WM_DEBUG_RX,
   9602 			    ("%s: RX: got Rx intr 0x%08x\n",
   9603 				device_xname(sc->sc_dev),
   9604 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9605 			WM_Q_EVCNT_INCR(rxq, intr);
   9606 		}
   9607 #endif
   9608 		/*
   9609 		 * wm_rxeof() does *not* call upper layer functions directly,
   9610 		 * as if_percpuq_enqueue() just call softint_schedule().
   9611 		 * So, we can call wm_rxeof() in interrupt context.
   9612 		 */
   9613 		wm_rxeof(rxq, UINT_MAX);
   9614 
   9615 		mutex_exit(rxq->rxq_lock);
   9616 		mutex_enter(txq->txq_lock);
   9617 
   9618 		if (txq->txq_stopping) {
   9619 			mutex_exit(txq->txq_lock);
   9620 			break;
   9621 		}
   9622 
   9623 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9624 		if (icr & ICR_TXDW) {
   9625 			DPRINTF(WM_DEBUG_TX,
   9626 			    ("%s: TX: got TXDW interrupt\n",
   9627 				device_xname(sc->sc_dev)));
   9628 			WM_Q_EVCNT_INCR(txq, txdw);
   9629 		}
   9630 #endif
   9631 		wm_txeof(txq, UINT_MAX);
   9632 
   9633 		mutex_exit(txq->txq_lock);
   9634 		WM_CORE_LOCK(sc);
   9635 
   9636 		if (sc->sc_core_stopping) {
   9637 			WM_CORE_UNLOCK(sc);
   9638 			break;
   9639 		}
   9640 
   9641 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9642 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9643 			wm_linkintr(sc, icr);
   9644 		}
   9645 		if ((icr & ICR_GPI(0)) != 0)
   9646 			device_printf(sc->sc_dev, "got module interrupt\n");
   9647 
   9648 		WM_CORE_UNLOCK(sc);
   9649 
   9650 		if (icr & ICR_RXO) {
   9651 #if defined(WM_DEBUG)
   9652 			log(LOG_WARNING, "%s: Receive overrun\n",
   9653 			    device_xname(sc->sc_dev));
   9654 #endif /* defined(WM_DEBUG) */
   9655 		}
   9656 	}
   9657 
   9658 	rnd_add_uint32(&sc->rnd_source, rndval);
   9659 
   9660 	if (handled) {
   9661 		/* Try to get more packets going. */
   9662 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9663 		wm_sched_handle_queue(sc, wmq);
   9664 	}
   9665 
   9666 	return handled;
   9667 }
   9668 
   9669 static inline void
   9670 wm_txrxintr_disable(struct wm_queue *wmq)
   9671 {
   9672 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9673 
   9674 	if (sc->sc_type == WM_T_82574)
   9675 		CSR_WRITE(sc, WMREG_IMC,
   9676 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9677 	else if (sc->sc_type == WM_T_82575)
   9678 		CSR_WRITE(sc, WMREG_EIMC,
   9679 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9680 	else
   9681 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9682 }
   9683 
   9684 static inline void
   9685 wm_txrxintr_enable(struct wm_queue *wmq)
   9686 {
   9687 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9688 
   9689 	wm_itrs_calculate(sc, wmq);
   9690 
   9691 	/*
   9692 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9693 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9694 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9695 	 * while each wm_handle_queue(wmq) is runnig.
   9696 	 */
   9697 	if (sc->sc_type == WM_T_82574)
   9698 		CSR_WRITE(sc, WMREG_IMS,
   9699 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9700 	else if (sc->sc_type == WM_T_82575)
   9701 		CSR_WRITE(sc, WMREG_EIMS,
   9702 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9703 	else
   9704 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9705 }
   9706 
   9707 static int
   9708 wm_txrxintr_msix(void *arg)
   9709 {
   9710 	struct wm_queue *wmq = arg;
   9711 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9712 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9713 	struct wm_softc *sc = txq->txq_sc;
   9714 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9715 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9716 	bool txmore;
   9717 	bool rxmore;
   9718 
   9719 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9720 
   9721 	DPRINTF(WM_DEBUG_TX,
   9722 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9723 
   9724 	wm_txrxintr_disable(wmq);
   9725 
   9726 	mutex_enter(txq->txq_lock);
   9727 
   9728 	if (txq->txq_stopping) {
   9729 		mutex_exit(txq->txq_lock);
   9730 		return 0;
   9731 	}
   9732 
   9733 	WM_Q_EVCNT_INCR(txq, txdw);
   9734 	txmore = wm_txeof(txq, txlimit);
   9735 	/* wm_deferred start() is done in wm_handle_queue(). */
   9736 	mutex_exit(txq->txq_lock);
   9737 
   9738 	DPRINTF(WM_DEBUG_RX,
   9739 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9740 	mutex_enter(rxq->rxq_lock);
   9741 
   9742 	if (rxq->rxq_stopping) {
   9743 		mutex_exit(rxq->rxq_lock);
   9744 		return 0;
   9745 	}
   9746 
   9747 	WM_Q_EVCNT_INCR(rxq, intr);
   9748 	rxmore = wm_rxeof(rxq, rxlimit);
   9749 	mutex_exit(rxq->rxq_lock);
   9750 
   9751 	wm_itrs_writereg(sc, wmq);
   9752 
   9753 	if (txmore || rxmore) {
   9754 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9755 		wm_sched_handle_queue(sc, wmq);
   9756 	} else
   9757 		wm_txrxintr_enable(wmq);
   9758 
   9759 	return 1;
   9760 }
   9761 
   9762 static void
   9763 wm_handle_queue(void *arg)
   9764 {
   9765 	struct wm_queue *wmq = arg;
   9766 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9767 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9768 	struct wm_softc *sc = txq->txq_sc;
   9769 	u_int txlimit = sc->sc_tx_process_limit;
   9770 	u_int rxlimit = sc->sc_rx_process_limit;
   9771 	bool txmore;
   9772 	bool rxmore;
   9773 
   9774 	mutex_enter(txq->txq_lock);
   9775 	if (txq->txq_stopping) {
   9776 		mutex_exit(txq->txq_lock);
   9777 		return;
   9778 	}
   9779 	txmore = wm_txeof(txq, txlimit);
   9780 	wm_deferred_start_locked(txq);
   9781 	mutex_exit(txq->txq_lock);
   9782 
   9783 	mutex_enter(rxq->rxq_lock);
   9784 	if (rxq->rxq_stopping) {
   9785 		mutex_exit(rxq->rxq_lock);
   9786 		return;
   9787 	}
   9788 	WM_Q_EVCNT_INCR(rxq, defer);
   9789 	rxmore = wm_rxeof(rxq, rxlimit);
   9790 	mutex_exit(rxq->rxq_lock);
   9791 
   9792 	if (txmore || rxmore) {
   9793 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9794 		wm_sched_handle_queue(sc, wmq);
   9795 	} else
   9796 		wm_txrxintr_enable(wmq);
   9797 }
   9798 
   9799 static void
   9800 wm_handle_queue_work(struct work *wk, void *context)
   9801 {
   9802 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   9803 
   9804 	/*
   9805 	 * "enqueued flag" is not required here.
   9806 	 */
   9807 	wm_handle_queue(wmq);
   9808 }
   9809 
   9810 /*
   9811  * wm_linkintr_msix:
   9812  *
   9813  *	Interrupt service routine for link status change for MSI-X.
   9814  */
   9815 static int
   9816 wm_linkintr_msix(void *arg)
   9817 {
   9818 	struct wm_softc *sc = arg;
   9819 	uint32_t reg;
   9820 	bool has_rxo;
   9821 
   9822 	reg = CSR_READ(sc, WMREG_ICR);
   9823 	WM_CORE_LOCK(sc);
   9824 	DPRINTF(WM_DEBUG_LINK,
   9825 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9826 		device_xname(sc->sc_dev), reg));
   9827 
   9828 	if (sc->sc_core_stopping)
   9829 		goto out;
   9830 
   9831 	if ((reg & ICR_LSC) != 0) {
   9832 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9833 		wm_linkintr(sc, ICR_LSC);
   9834 	}
   9835 	if ((reg & ICR_GPI(0)) != 0)
   9836 		device_printf(sc->sc_dev, "got module interrupt\n");
   9837 
   9838 	/*
   9839 	 * XXX 82574 MSI-X mode workaround
   9840 	 *
   9841 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9842 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9843 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9844 	 * interrupts by writing WMREG_ICS to process receive packets.
   9845 	 */
   9846 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9847 #if defined(WM_DEBUG)
   9848 		log(LOG_WARNING, "%s: Receive overrun\n",
   9849 		    device_xname(sc->sc_dev));
   9850 #endif /* defined(WM_DEBUG) */
   9851 
   9852 		has_rxo = true;
   9853 		/*
   9854 		 * The RXO interrupt is very high rate when receive traffic is
   9855 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9856 		 * interrupts. ICR_OTHER will be enabled at the end of
   9857 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9858 		 * ICR_RXQ(1) interrupts.
   9859 		 */
   9860 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9861 
   9862 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9863 	}
   9864 
   9865 
   9866 
   9867 out:
   9868 	WM_CORE_UNLOCK(sc);
   9869 
   9870 	if (sc->sc_type == WM_T_82574) {
   9871 		if (!has_rxo)
   9872 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9873 		else
   9874 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9875 	} else if (sc->sc_type == WM_T_82575)
   9876 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9877 	else
   9878 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9879 
   9880 	return 1;
   9881 }
   9882 
   9883 /*
   9884  * Media related.
   9885  * GMII, SGMII, TBI (and SERDES)
   9886  */
   9887 
   9888 /* Common */
   9889 
   9890 /*
   9891  * wm_tbi_serdes_set_linkled:
   9892  *
   9893  *	Update the link LED on TBI and SERDES devices.
   9894  */
   9895 static void
   9896 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9897 {
   9898 
   9899 	if (sc->sc_tbi_linkup)
   9900 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9901 	else
   9902 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9903 
   9904 	/* 82540 or newer devices are active low */
   9905 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9906 
   9907 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9908 }
   9909 
   9910 /* GMII related */
   9911 
   9912 /*
   9913  * wm_gmii_reset:
   9914  *
   9915  *	Reset the PHY.
   9916  */
   9917 static void
   9918 wm_gmii_reset(struct wm_softc *sc)
   9919 {
   9920 	uint32_t reg;
   9921 	int rv;
   9922 
   9923 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9924 		device_xname(sc->sc_dev), __func__));
   9925 
   9926 	rv = sc->phy.acquire(sc);
   9927 	if (rv != 0) {
   9928 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9929 		    __func__);
   9930 		return;
   9931 	}
   9932 
   9933 	switch (sc->sc_type) {
   9934 	case WM_T_82542_2_0:
   9935 	case WM_T_82542_2_1:
   9936 		/* null */
   9937 		break;
   9938 	case WM_T_82543:
   9939 		/*
   9940 		 * With 82543, we need to force speed and duplex on the MAC
   9941 		 * equal to what the PHY speed and duplex configuration is.
   9942 		 * In addition, we need to perform a hardware reset on the PHY
   9943 		 * to take it out of reset.
   9944 		 */
   9945 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9946 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9947 
   9948 		/* The PHY reset pin is active-low. */
   9949 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9950 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9951 		    CTRL_EXT_SWDPIN(4));
   9952 		reg |= CTRL_EXT_SWDPIO(4);
   9953 
   9954 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9955 		CSR_WRITE_FLUSH(sc);
   9956 		delay(10*1000);
   9957 
   9958 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9959 		CSR_WRITE_FLUSH(sc);
   9960 		delay(150);
   9961 #if 0
   9962 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9963 #endif
   9964 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9965 		break;
   9966 	case WM_T_82544:	/* Reset 10000us */
   9967 	case WM_T_82540:
   9968 	case WM_T_82545:
   9969 	case WM_T_82545_3:
   9970 	case WM_T_82546:
   9971 	case WM_T_82546_3:
   9972 	case WM_T_82541:
   9973 	case WM_T_82541_2:
   9974 	case WM_T_82547:
   9975 	case WM_T_82547_2:
   9976 	case WM_T_82571:	/* Reset 100us */
   9977 	case WM_T_82572:
   9978 	case WM_T_82573:
   9979 	case WM_T_82574:
   9980 	case WM_T_82575:
   9981 	case WM_T_82576:
   9982 	case WM_T_82580:
   9983 	case WM_T_I350:
   9984 	case WM_T_I354:
   9985 	case WM_T_I210:
   9986 	case WM_T_I211:
   9987 	case WM_T_82583:
   9988 	case WM_T_80003:
   9989 		/* Generic reset */
   9990 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9991 		CSR_WRITE_FLUSH(sc);
   9992 		delay(20000);
   9993 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9994 		CSR_WRITE_FLUSH(sc);
   9995 		delay(20000);
   9996 
   9997 		if ((sc->sc_type == WM_T_82541)
   9998 		    || (sc->sc_type == WM_T_82541_2)
   9999 		    || (sc->sc_type == WM_T_82547)
   10000 		    || (sc->sc_type == WM_T_82547_2)) {
   10001 			/* Workaround for igp are done in igp_reset() */
   10002 			/* XXX add code to set LED after phy reset */
   10003 		}
   10004 		break;
   10005 	case WM_T_ICH8:
   10006 	case WM_T_ICH9:
   10007 	case WM_T_ICH10:
   10008 	case WM_T_PCH:
   10009 	case WM_T_PCH2:
   10010 	case WM_T_PCH_LPT:
   10011 	case WM_T_PCH_SPT:
   10012 	case WM_T_PCH_CNP:
   10013 		/* Generic reset */
   10014 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10015 		CSR_WRITE_FLUSH(sc);
   10016 		delay(100);
   10017 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10018 		CSR_WRITE_FLUSH(sc);
   10019 		delay(150);
   10020 		break;
   10021 	default:
   10022 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10023 		    __func__);
   10024 		break;
   10025 	}
   10026 
   10027 	sc->phy.release(sc);
   10028 
   10029 	/* get_cfg_done */
   10030 	wm_get_cfg_done(sc);
   10031 
   10032 	/* Extra setup */
   10033 	switch (sc->sc_type) {
   10034 	case WM_T_82542_2_0:
   10035 	case WM_T_82542_2_1:
   10036 	case WM_T_82543:
   10037 	case WM_T_82544:
   10038 	case WM_T_82540:
   10039 	case WM_T_82545:
   10040 	case WM_T_82545_3:
   10041 	case WM_T_82546:
   10042 	case WM_T_82546_3:
   10043 	case WM_T_82541_2:
   10044 	case WM_T_82547_2:
   10045 	case WM_T_82571:
   10046 	case WM_T_82572:
   10047 	case WM_T_82573:
   10048 	case WM_T_82574:
   10049 	case WM_T_82583:
   10050 	case WM_T_82575:
   10051 	case WM_T_82576:
   10052 	case WM_T_82580:
   10053 	case WM_T_I350:
   10054 	case WM_T_I354:
   10055 	case WM_T_I210:
   10056 	case WM_T_I211:
   10057 	case WM_T_80003:
   10058 		/* Null */
   10059 		break;
   10060 	case WM_T_82541:
   10061 	case WM_T_82547:
   10062 		/* XXX Configure actively LED after PHY reset */
   10063 		break;
   10064 	case WM_T_ICH8:
   10065 	case WM_T_ICH9:
   10066 	case WM_T_ICH10:
   10067 	case WM_T_PCH:
   10068 	case WM_T_PCH2:
   10069 	case WM_T_PCH_LPT:
   10070 	case WM_T_PCH_SPT:
   10071 	case WM_T_PCH_CNP:
   10072 		wm_phy_post_reset(sc);
   10073 		break;
   10074 	default:
   10075 		panic("%s: unknown type\n", __func__);
   10076 		break;
   10077 	}
   10078 }
   10079 
   10080 /*
   10081  * Setup sc_phytype and mii_{read|write}reg.
   10082  *
   10083  *  To identify PHY type, correct read/write function should be selected.
   10084  * To select correct read/write function, PCI ID or MAC type are required
   10085  * without accessing PHY registers.
   10086  *
   10087  *  On the first call of this function, PHY ID is not known yet. Check
   10088  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10089  * result might be incorrect.
   10090  *
   10091  *  In the second call, PHY OUI and model is used to identify PHY type.
   10092  * It might not be perfect because of the lack of compared entry, but it
   10093  * would be better than the first call.
   10094  *
   10095  *  If the detected new result and previous assumption is different,
   10096  * diagnous message will be printed.
   10097  */
   10098 static void
   10099 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10100     uint16_t phy_model)
   10101 {
   10102 	device_t dev = sc->sc_dev;
   10103 	struct mii_data *mii = &sc->sc_mii;
   10104 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10105 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10106 	mii_readreg_t new_readreg;
   10107 	mii_writereg_t new_writereg;
   10108 	bool dodiag = true;
   10109 
   10110 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10111 		device_xname(sc->sc_dev), __func__));
   10112 
   10113 	/*
   10114 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10115 	 * incorrect. So don't print diag output when it's 2nd call.
   10116 	 */
   10117 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10118 		dodiag = false;
   10119 
   10120 	if (mii->mii_readreg == NULL) {
   10121 		/*
   10122 		 *  This is the first call of this function. For ICH and PCH
   10123 		 * variants, it's difficult to determine the PHY access method
   10124 		 * by sc_type, so use the PCI product ID for some devices.
   10125 		 */
   10126 
   10127 		switch (sc->sc_pcidevid) {
   10128 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10129 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10130 			/* 82577 */
   10131 			new_phytype = WMPHY_82577;
   10132 			break;
   10133 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10134 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10135 			/* 82578 */
   10136 			new_phytype = WMPHY_82578;
   10137 			break;
   10138 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10139 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10140 			/* 82579 */
   10141 			new_phytype = WMPHY_82579;
   10142 			break;
   10143 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10144 		case PCI_PRODUCT_INTEL_82801I_BM:
   10145 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10146 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10147 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10148 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10149 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10150 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10151 			/* ICH8, 9, 10 with 82567 */
   10152 			new_phytype = WMPHY_BM;
   10153 			break;
   10154 		default:
   10155 			break;
   10156 		}
   10157 	} else {
   10158 		/* It's not the first call. Use PHY OUI and model */
   10159 		switch (phy_oui) {
   10160 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10161 			switch (phy_model) {
   10162 			case 0x0004: /* XXX */
   10163 				new_phytype = WMPHY_82578;
   10164 				break;
   10165 			default:
   10166 				break;
   10167 			}
   10168 			break;
   10169 		case MII_OUI_xxMARVELL:
   10170 			switch (phy_model) {
   10171 			case MII_MODEL_xxMARVELL_I210:
   10172 				new_phytype = WMPHY_I210;
   10173 				break;
   10174 			case MII_MODEL_xxMARVELL_E1011:
   10175 			case MII_MODEL_xxMARVELL_E1000_3:
   10176 			case MII_MODEL_xxMARVELL_E1000_5:
   10177 			case MII_MODEL_xxMARVELL_E1112:
   10178 				new_phytype = WMPHY_M88;
   10179 				break;
   10180 			case MII_MODEL_xxMARVELL_E1149:
   10181 				new_phytype = WMPHY_BM;
   10182 				break;
   10183 			case MII_MODEL_xxMARVELL_E1111:
   10184 			case MII_MODEL_xxMARVELL_I347:
   10185 			case MII_MODEL_xxMARVELL_E1512:
   10186 			case MII_MODEL_xxMARVELL_E1340M:
   10187 			case MII_MODEL_xxMARVELL_E1543:
   10188 				new_phytype = WMPHY_M88;
   10189 				break;
   10190 			case MII_MODEL_xxMARVELL_I82563:
   10191 				new_phytype = WMPHY_GG82563;
   10192 				break;
   10193 			default:
   10194 				break;
   10195 			}
   10196 			break;
   10197 		case MII_OUI_INTEL:
   10198 			switch (phy_model) {
   10199 			case MII_MODEL_INTEL_I82577:
   10200 				new_phytype = WMPHY_82577;
   10201 				break;
   10202 			case MII_MODEL_INTEL_I82579:
   10203 				new_phytype = WMPHY_82579;
   10204 				break;
   10205 			case MII_MODEL_INTEL_I217:
   10206 				new_phytype = WMPHY_I217;
   10207 				break;
   10208 			case MII_MODEL_INTEL_I82580:
   10209 			case MII_MODEL_INTEL_I350:
   10210 				new_phytype = WMPHY_82580;
   10211 				break;
   10212 			default:
   10213 				break;
   10214 			}
   10215 			break;
   10216 		case MII_OUI_yyINTEL:
   10217 			switch (phy_model) {
   10218 			case MII_MODEL_yyINTEL_I82562G:
   10219 			case MII_MODEL_yyINTEL_I82562EM:
   10220 			case MII_MODEL_yyINTEL_I82562ET:
   10221 				new_phytype = WMPHY_IFE;
   10222 				break;
   10223 			case MII_MODEL_yyINTEL_IGP01E1000:
   10224 				new_phytype = WMPHY_IGP;
   10225 				break;
   10226 			case MII_MODEL_yyINTEL_I82566:
   10227 				new_phytype = WMPHY_IGP_3;
   10228 				break;
   10229 			default:
   10230 				break;
   10231 			}
   10232 			break;
   10233 		default:
   10234 			break;
   10235 		}
   10236 
   10237 		if (dodiag) {
   10238 			if (new_phytype == WMPHY_UNKNOWN)
   10239 				aprint_verbose_dev(dev,
   10240 				    "%s: Unknown PHY model. OUI=%06x, "
   10241 				    "model=%04x\n", __func__, phy_oui,
   10242 				    phy_model);
   10243 
   10244 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10245 			    && (sc->sc_phytype != new_phytype)) {
   10246 				aprint_error_dev(dev, "Previously assumed PHY "
   10247 				    "type(%u) was incorrect. PHY type from PHY"
   10248 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10249 			}
   10250 		}
   10251 	}
   10252 
   10253 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10254 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10255 		/* SGMII */
   10256 		new_readreg = wm_sgmii_readreg;
   10257 		new_writereg = wm_sgmii_writereg;
   10258 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10259 		/* BM2 (phyaddr == 1) */
   10260 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10261 		    && (new_phytype != WMPHY_BM)
   10262 		    && (new_phytype != WMPHY_UNKNOWN))
   10263 			doubt_phytype = new_phytype;
   10264 		new_phytype = WMPHY_BM;
   10265 		new_readreg = wm_gmii_bm_readreg;
   10266 		new_writereg = wm_gmii_bm_writereg;
   10267 	} else if (sc->sc_type >= WM_T_PCH) {
   10268 		/* All PCH* use _hv_ */
   10269 		new_readreg = wm_gmii_hv_readreg;
   10270 		new_writereg = wm_gmii_hv_writereg;
   10271 	} else if (sc->sc_type >= WM_T_ICH8) {
   10272 		/* non-82567 ICH8, 9 and 10 */
   10273 		new_readreg = wm_gmii_i82544_readreg;
   10274 		new_writereg = wm_gmii_i82544_writereg;
   10275 	} else if (sc->sc_type >= WM_T_80003) {
   10276 		/* 80003 */
   10277 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10278 		    && (new_phytype != WMPHY_GG82563)
   10279 		    && (new_phytype != WMPHY_UNKNOWN))
   10280 			doubt_phytype = new_phytype;
   10281 		new_phytype = WMPHY_GG82563;
   10282 		new_readreg = wm_gmii_i80003_readreg;
   10283 		new_writereg = wm_gmii_i80003_writereg;
   10284 	} else if (sc->sc_type >= WM_T_I210) {
   10285 		/* I210 and I211 */
   10286 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10287 		    && (new_phytype != WMPHY_I210)
   10288 		    && (new_phytype != WMPHY_UNKNOWN))
   10289 			doubt_phytype = new_phytype;
   10290 		new_phytype = WMPHY_I210;
   10291 		new_readreg = wm_gmii_gs40g_readreg;
   10292 		new_writereg = wm_gmii_gs40g_writereg;
   10293 	} else if (sc->sc_type >= WM_T_82580) {
   10294 		/* 82580, I350 and I354 */
   10295 		new_readreg = wm_gmii_82580_readreg;
   10296 		new_writereg = wm_gmii_82580_writereg;
   10297 	} else if (sc->sc_type >= WM_T_82544) {
   10298 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10299 		new_readreg = wm_gmii_i82544_readreg;
   10300 		new_writereg = wm_gmii_i82544_writereg;
   10301 	} else {
   10302 		new_readreg = wm_gmii_i82543_readreg;
   10303 		new_writereg = wm_gmii_i82543_writereg;
   10304 	}
   10305 
   10306 	if (new_phytype == WMPHY_BM) {
   10307 		/* All BM use _bm_ */
   10308 		new_readreg = wm_gmii_bm_readreg;
   10309 		new_writereg = wm_gmii_bm_writereg;
   10310 	}
   10311 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10312 		/* All PCH* use _hv_ */
   10313 		new_readreg = wm_gmii_hv_readreg;
   10314 		new_writereg = wm_gmii_hv_writereg;
   10315 	}
   10316 
   10317 	/* Diag output */
   10318 	if (dodiag) {
   10319 		if (doubt_phytype != WMPHY_UNKNOWN)
   10320 			aprint_error_dev(dev, "Assumed new PHY type was "
   10321 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10322 			    new_phytype);
   10323 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10324 		    && (sc->sc_phytype != new_phytype))
   10325 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10326 			    "was incorrect. New PHY type = %u\n",
   10327 			    sc->sc_phytype, new_phytype);
   10328 
   10329 		if ((mii->mii_readreg != NULL) &&
   10330 		    (new_phytype == WMPHY_UNKNOWN))
   10331 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10332 
   10333 		if ((mii->mii_readreg != NULL) &&
   10334 		    (mii->mii_readreg != new_readreg))
   10335 			aprint_error_dev(dev, "Previously assumed PHY "
   10336 			    "read/write function was incorrect.\n");
   10337 	}
   10338 
   10339 	/* Update now */
   10340 	sc->sc_phytype = new_phytype;
   10341 	mii->mii_readreg = new_readreg;
   10342 	mii->mii_writereg = new_writereg;
   10343 	if (new_readreg == wm_gmii_hv_readreg) {
   10344 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10345 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10346 	} else if (new_readreg == wm_sgmii_readreg) {
   10347 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10348 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10349 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10350 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10351 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10352 	}
   10353 }
   10354 
   10355 /*
   10356  * wm_get_phy_id_82575:
   10357  *
   10358  * Return PHY ID. Return -1 if it failed.
   10359  */
   10360 static int
   10361 wm_get_phy_id_82575(struct wm_softc *sc)
   10362 {
   10363 	uint32_t reg;
   10364 	int phyid = -1;
   10365 
   10366 	/* XXX */
   10367 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10368 		return -1;
   10369 
   10370 	if (wm_sgmii_uses_mdio(sc)) {
   10371 		switch (sc->sc_type) {
   10372 		case WM_T_82575:
   10373 		case WM_T_82576:
   10374 			reg = CSR_READ(sc, WMREG_MDIC);
   10375 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10376 			break;
   10377 		case WM_T_82580:
   10378 		case WM_T_I350:
   10379 		case WM_T_I354:
   10380 		case WM_T_I210:
   10381 		case WM_T_I211:
   10382 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10383 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10384 			break;
   10385 		default:
   10386 			return -1;
   10387 		}
   10388 	}
   10389 
   10390 	return phyid;
   10391 }
   10392 
   10393 
   10394 /*
   10395  * wm_gmii_mediainit:
   10396  *
   10397  *	Initialize media for use on 1000BASE-T devices.
   10398  */
   10399 static void
   10400 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10401 {
   10402 	device_t dev = sc->sc_dev;
   10403 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10404 	struct mii_data *mii = &sc->sc_mii;
   10405 
   10406 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10407 		device_xname(sc->sc_dev), __func__));
   10408 
   10409 	/* We have GMII. */
   10410 	sc->sc_flags |= WM_F_HAS_MII;
   10411 
   10412 	if (sc->sc_type == WM_T_80003)
   10413 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10414 	else
   10415 		sc->sc_tipg = TIPG_1000T_DFLT;
   10416 
   10417 	/*
   10418 	 * Let the chip set speed/duplex on its own based on
   10419 	 * signals from the PHY.
   10420 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10421 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10422 	 */
   10423 	sc->sc_ctrl |= CTRL_SLU;
   10424 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10425 
   10426 	/* Initialize our media structures and probe the GMII. */
   10427 	mii->mii_ifp = ifp;
   10428 
   10429 	mii->mii_statchg = wm_gmii_statchg;
   10430 
   10431 	/* get PHY control from SMBus to PCIe */
   10432 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10433 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10434 	    || (sc->sc_type == WM_T_PCH_CNP))
   10435 		wm_init_phy_workarounds_pchlan(sc);
   10436 
   10437 	wm_gmii_reset(sc);
   10438 
   10439 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10440 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10441 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10442 
   10443 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10444 	    || (sc->sc_type == WM_T_82580)
   10445 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10446 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10447 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10448 			/* Attach only one port */
   10449 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10450 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10451 		} else {
   10452 			int i, id;
   10453 			uint32_t ctrl_ext;
   10454 
   10455 			id = wm_get_phy_id_82575(sc);
   10456 			if (id != -1) {
   10457 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10458 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10459 			}
   10460 			if ((id == -1)
   10461 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10462 				/* Power on sgmii phy if it is disabled */
   10463 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10464 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10465 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10466 				CSR_WRITE_FLUSH(sc);
   10467 				delay(300*1000); /* XXX too long */
   10468 
   10469 				/*
   10470 				 * From 1 to 8.
   10471 				 *
   10472 				 * I2C access fails with I2C register's ERROR
   10473 				 * bit set, so prevent error message while
   10474 				 * scanning.
   10475 				 */
   10476 				sc->phy.no_errprint = true;
   10477 				for (i = 1; i < 8; i++)
   10478 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10479 					    0xffffffff, i, MII_OFFSET_ANY,
   10480 					    MIIF_DOPAUSE);
   10481 				sc->phy.no_errprint = false;
   10482 
   10483 				/* Restore previous sfp cage power state */
   10484 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10485 			}
   10486 		}
   10487 	} else
   10488 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10489 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10490 
   10491 	/*
   10492 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10493 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10494 	 */
   10495 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10496 		|| (sc->sc_type == WM_T_PCH_SPT)
   10497 		|| (sc->sc_type == WM_T_PCH_CNP))
   10498 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10499 		wm_set_mdio_slow_mode_hv(sc);
   10500 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10501 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10502 	}
   10503 
   10504 	/*
   10505 	 * (For ICH8 variants)
   10506 	 * If PHY detection failed, use BM's r/w function and retry.
   10507 	 */
   10508 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10509 		/* if failed, retry with *_bm_* */
   10510 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10511 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10512 		    sc->sc_phytype);
   10513 		sc->sc_phytype = WMPHY_BM;
   10514 		mii->mii_readreg = wm_gmii_bm_readreg;
   10515 		mii->mii_writereg = wm_gmii_bm_writereg;
   10516 
   10517 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10518 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10519 	}
   10520 
   10521 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10522 		/* Any PHY wasn't find */
   10523 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10524 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10525 		sc->sc_phytype = WMPHY_NONE;
   10526 	} else {
   10527 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10528 
   10529 		/*
   10530 		 * PHY Found! Check PHY type again by the second call of
   10531 		 * wm_gmii_setup_phytype.
   10532 		 */
   10533 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10534 		    child->mii_mpd_model);
   10535 
   10536 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10537 	}
   10538 }
   10539 
   10540 /*
   10541  * wm_gmii_mediachange:	[ifmedia interface function]
   10542  *
   10543  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10544  */
   10545 static int
   10546 wm_gmii_mediachange(struct ifnet *ifp)
   10547 {
   10548 	struct wm_softc *sc = ifp->if_softc;
   10549 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10550 	uint32_t reg;
   10551 	int rc;
   10552 
   10553 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10554 		device_xname(sc->sc_dev), __func__));
   10555 	if ((ifp->if_flags & IFF_UP) == 0)
   10556 		return 0;
   10557 
   10558 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10559 	if ((sc->sc_type == WM_T_82580)
   10560 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10561 	    || (sc->sc_type == WM_T_I211)) {
   10562 		reg = CSR_READ(sc, WMREG_PHPM);
   10563 		reg &= ~PHPM_GO_LINK_D;
   10564 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10565 	}
   10566 
   10567 	/* Disable D0 LPLU. */
   10568 	wm_lplu_d0_disable(sc);
   10569 
   10570 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10571 	sc->sc_ctrl |= CTRL_SLU;
   10572 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10573 	    || (sc->sc_type > WM_T_82543)) {
   10574 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10575 	} else {
   10576 		sc->sc_ctrl &= ~CTRL_ASDE;
   10577 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10578 		if (ife->ifm_media & IFM_FDX)
   10579 			sc->sc_ctrl |= CTRL_FD;
   10580 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10581 		case IFM_10_T:
   10582 			sc->sc_ctrl |= CTRL_SPEED_10;
   10583 			break;
   10584 		case IFM_100_TX:
   10585 			sc->sc_ctrl |= CTRL_SPEED_100;
   10586 			break;
   10587 		case IFM_1000_T:
   10588 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10589 			break;
   10590 		case IFM_NONE:
   10591 			/* There is no specific setting for IFM_NONE */
   10592 			break;
   10593 		default:
   10594 			panic("wm_gmii_mediachange: bad media 0x%x",
   10595 			    ife->ifm_media);
   10596 		}
   10597 	}
   10598 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10599 	CSR_WRITE_FLUSH(sc);
   10600 
   10601 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10602 		wm_serdes_mediachange(ifp);
   10603 
   10604 	if (sc->sc_type <= WM_T_82543)
   10605 		wm_gmii_reset(sc);
   10606 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10607 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10608 		/* allow time for SFP cage time to power up phy */
   10609 		delay(300 * 1000);
   10610 		wm_gmii_reset(sc);
   10611 	}
   10612 
   10613 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10614 		return 0;
   10615 	return rc;
   10616 }
   10617 
   10618 /*
   10619  * wm_gmii_mediastatus:	[ifmedia interface function]
   10620  *
   10621  *	Get the current interface media status on a 1000BASE-T device.
   10622  */
   10623 static void
   10624 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10625 {
   10626 	struct wm_softc *sc = ifp->if_softc;
   10627 
   10628 	ether_mediastatus(ifp, ifmr);
   10629 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10630 	    | sc->sc_flowflags;
   10631 }
   10632 
   10633 #define	MDI_IO		CTRL_SWDPIN(2)
   10634 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10635 #define	MDI_CLK		CTRL_SWDPIN(3)
   10636 
   10637 static void
   10638 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10639 {
   10640 	uint32_t i, v;
   10641 
   10642 	v = CSR_READ(sc, WMREG_CTRL);
   10643 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10644 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10645 
   10646 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10647 		if (data & i)
   10648 			v |= MDI_IO;
   10649 		else
   10650 			v &= ~MDI_IO;
   10651 		CSR_WRITE(sc, WMREG_CTRL, v);
   10652 		CSR_WRITE_FLUSH(sc);
   10653 		delay(10);
   10654 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10655 		CSR_WRITE_FLUSH(sc);
   10656 		delay(10);
   10657 		CSR_WRITE(sc, WMREG_CTRL, v);
   10658 		CSR_WRITE_FLUSH(sc);
   10659 		delay(10);
   10660 	}
   10661 }
   10662 
   10663 static uint16_t
   10664 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10665 {
   10666 	uint32_t v, i;
   10667 	uint16_t data = 0;
   10668 
   10669 	v = CSR_READ(sc, WMREG_CTRL);
   10670 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10671 	v |= CTRL_SWDPIO(3);
   10672 
   10673 	CSR_WRITE(sc, WMREG_CTRL, v);
   10674 	CSR_WRITE_FLUSH(sc);
   10675 	delay(10);
   10676 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10677 	CSR_WRITE_FLUSH(sc);
   10678 	delay(10);
   10679 	CSR_WRITE(sc, WMREG_CTRL, v);
   10680 	CSR_WRITE_FLUSH(sc);
   10681 	delay(10);
   10682 
   10683 	for (i = 0; i < 16; i++) {
   10684 		data <<= 1;
   10685 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10686 		CSR_WRITE_FLUSH(sc);
   10687 		delay(10);
   10688 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10689 			data |= 1;
   10690 		CSR_WRITE(sc, WMREG_CTRL, v);
   10691 		CSR_WRITE_FLUSH(sc);
   10692 		delay(10);
   10693 	}
   10694 
   10695 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10696 	CSR_WRITE_FLUSH(sc);
   10697 	delay(10);
   10698 	CSR_WRITE(sc, WMREG_CTRL, v);
   10699 	CSR_WRITE_FLUSH(sc);
   10700 	delay(10);
   10701 
   10702 	return data;
   10703 }
   10704 
   10705 #undef MDI_IO
   10706 #undef MDI_DIR
   10707 #undef MDI_CLK
   10708 
   10709 /*
   10710  * wm_gmii_i82543_readreg:	[mii interface function]
   10711  *
   10712  *	Read a PHY register on the GMII (i82543 version).
   10713  */
   10714 static int
   10715 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10716 {
   10717 	struct wm_softc *sc = device_private(dev);
   10718 
   10719 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10720 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10721 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10722 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10723 
   10724 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10725 		device_xname(dev), phy, reg, *val));
   10726 
   10727 	return 0;
   10728 }
   10729 
   10730 /*
   10731  * wm_gmii_i82543_writereg:	[mii interface function]
   10732  *
   10733  *	Write a PHY register on the GMII (i82543 version).
   10734  */
   10735 static int
   10736 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10737 {
   10738 	struct wm_softc *sc = device_private(dev);
   10739 
   10740 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10741 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10742 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10743 	    (MII_COMMAND_START << 30), 32);
   10744 
   10745 	return 0;
   10746 }
   10747 
   10748 /*
   10749  * wm_gmii_mdic_readreg:	[mii interface function]
   10750  *
   10751  *	Read a PHY register on the GMII.
   10752  */
   10753 static int
   10754 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10755 {
   10756 	struct wm_softc *sc = device_private(dev);
   10757 	uint32_t mdic = 0;
   10758 	int i;
   10759 
   10760 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10761 	    && (reg > MII_ADDRMASK)) {
   10762 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10763 		    __func__, sc->sc_phytype, reg);
   10764 		reg &= MII_ADDRMASK;
   10765 	}
   10766 
   10767 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10768 	    MDIC_REGADD(reg));
   10769 
   10770 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10771 		delay(50);
   10772 		mdic = CSR_READ(sc, WMREG_MDIC);
   10773 		if (mdic & MDIC_READY)
   10774 			break;
   10775 	}
   10776 
   10777 	if ((mdic & MDIC_READY) == 0) {
   10778 		DPRINTF(WM_DEBUG_GMII,
   10779 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10780 			device_xname(dev), phy, reg));
   10781 		return ETIMEDOUT;
   10782 	} else if (mdic & MDIC_E) {
   10783 		/* This is normal if no PHY is present. */
   10784 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10785 			device_xname(sc->sc_dev), phy, reg));
   10786 		return -1;
   10787 	} else
   10788 		*val = MDIC_DATA(mdic);
   10789 
   10790 	/*
   10791 	 * Allow some time after each MDIC transaction to avoid
   10792 	 * reading duplicate data in the next MDIC transaction.
   10793 	 */
   10794 	if (sc->sc_type == WM_T_PCH2)
   10795 		delay(100);
   10796 
   10797 	return 0;
   10798 }
   10799 
   10800 /*
   10801  * wm_gmii_mdic_writereg:	[mii interface function]
   10802  *
   10803  *	Write a PHY register on the GMII.
   10804  */
   10805 static int
   10806 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10807 {
   10808 	struct wm_softc *sc = device_private(dev);
   10809 	uint32_t mdic = 0;
   10810 	int i;
   10811 
   10812 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10813 	    && (reg > MII_ADDRMASK)) {
   10814 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10815 		    __func__, sc->sc_phytype, reg);
   10816 		reg &= MII_ADDRMASK;
   10817 	}
   10818 
   10819 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10820 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10821 
   10822 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10823 		delay(50);
   10824 		mdic = CSR_READ(sc, WMREG_MDIC);
   10825 		if (mdic & MDIC_READY)
   10826 			break;
   10827 	}
   10828 
   10829 	if ((mdic & MDIC_READY) == 0) {
   10830 		DPRINTF(WM_DEBUG_GMII,
   10831 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10832 			device_xname(dev), phy, reg));
   10833 		return ETIMEDOUT;
   10834 	} else if (mdic & MDIC_E) {
   10835 		DPRINTF(WM_DEBUG_GMII,
   10836 		    ("%s: MDIC write error: phy %d reg %d\n",
   10837 			device_xname(dev), phy, reg));
   10838 		return -1;
   10839 	}
   10840 
   10841 	/*
   10842 	 * Allow some time after each MDIC transaction to avoid
   10843 	 * reading duplicate data in the next MDIC transaction.
   10844 	 */
   10845 	if (sc->sc_type == WM_T_PCH2)
   10846 		delay(100);
   10847 
   10848 	return 0;
   10849 }
   10850 
   10851 /*
   10852  * wm_gmii_i82544_readreg:	[mii interface function]
   10853  *
   10854  *	Read a PHY register on the GMII.
   10855  */
   10856 static int
   10857 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10858 {
   10859 	struct wm_softc *sc = device_private(dev);
   10860 	int rv;
   10861 
   10862 	if (sc->phy.acquire(sc)) {
   10863 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10864 		return -1;
   10865 	}
   10866 
   10867 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10868 
   10869 	sc->phy.release(sc);
   10870 
   10871 	return rv;
   10872 }
   10873 
   10874 static int
   10875 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10876 {
   10877 	struct wm_softc *sc = device_private(dev);
   10878 	int rv;
   10879 
   10880 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10881 		switch (sc->sc_phytype) {
   10882 		case WMPHY_IGP:
   10883 		case WMPHY_IGP_2:
   10884 		case WMPHY_IGP_3:
   10885 			rv = wm_gmii_mdic_writereg(dev, phy,
   10886 			    MII_IGPHY_PAGE_SELECT, reg);
   10887 			if (rv != 0)
   10888 				return rv;
   10889 			break;
   10890 		default:
   10891 #ifdef WM_DEBUG
   10892 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10893 			    __func__, sc->sc_phytype, reg);
   10894 #endif
   10895 			break;
   10896 		}
   10897 	}
   10898 
   10899 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10900 }
   10901 
   10902 /*
   10903  * wm_gmii_i82544_writereg:	[mii interface function]
   10904  *
   10905  *	Write a PHY register on the GMII.
   10906  */
   10907 static int
   10908 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10909 {
   10910 	struct wm_softc *sc = device_private(dev);
   10911 	int rv;
   10912 
   10913 	if (sc->phy.acquire(sc)) {
   10914 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10915 		return -1;
   10916 	}
   10917 
   10918 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10919 	sc->phy.release(sc);
   10920 
   10921 	return rv;
   10922 }
   10923 
   10924 static int
   10925 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10926 {
   10927 	struct wm_softc *sc = device_private(dev);
   10928 	int rv;
   10929 
   10930 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10931 		switch (sc->sc_phytype) {
   10932 		case WMPHY_IGP:
   10933 		case WMPHY_IGP_2:
   10934 		case WMPHY_IGP_3:
   10935 			rv = wm_gmii_mdic_writereg(dev, phy,
   10936 			    MII_IGPHY_PAGE_SELECT, reg);
   10937 			if (rv != 0)
   10938 				return rv;
   10939 			break;
   10940 		default:
   10941 #ifdef WM_DEBUG
   10942 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10943 			    __func__, sc->sc_phytype, reg);
   10944 #endif
   10945 			break;
   10946 		}
   10947 	}
   10948 
   10949 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10950 }
   10951 
   10952 /*
   10953  * wm_gmii_i80003_readreg:	[mii interface function]
   10954  *
   10955  *	Read a PHY register on the kumeran
   10956  * This could be handled by the PHY layer if we didn't have to lock the
   10957  * ressource ...
   10958  */
   10959 static int
   10960 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10961 {
   10962 	struct wm_softc *sc = device_private(dev);
   10963 	int page_select;
   10964 	uint16_t temp, temp2;
   10965 	int rv = 0;
   10966 
   10967 	if (phy != 1) /* Only one PHY on kumeran bus */
   10968 		return -1;
   10969 
   10970 	if (sc->phy.acquire(sc)) {
   10971 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10972 		return -1;
   10973 	}
   10974 
   10975 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10976 		page_select = GG82563_PHY_PAGE_SELECT;
   10977 	else {
   10978 		/*
   10979 		 * Use Alternative Page Select register to access registers
   10980 		 * 30 and 31.
   10981 		 */
   10982 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10983 	}
   10984 	temp = reg >> GG82563_PAGE_SHIFT;
   10985 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10986 		goto out;
   10987 
   10988 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10989 		/*
   10990 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10991 		 * register.
   10992 		 */
   10993 		delay(200);
   10994 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10995 		if ((rv != 0) || (temp2 != temp)) {
   10996 			device_printf(dev, "%s failed\n", __func__);
   10997 			rv = -1;
   10998 			goto out;
   10999 		}
   11000 		delay(200);
   11001 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11002 		delay(200);
   11003 	} else
   11004 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11005 
   11006 out:
   11007 	sc->phy.release(sc);
   11008 	return rv;
   11009 }
   11010 
   11011 /*
   11012  * wm_gmii_i80003_writereg:	[mii interface function]
   11013  *
   11014  *	Write a PHY register on the kumeran.
   11015  * This could be handled by the PHY layer if we didn't have to lock the
   11016  * ressource ...
   11017  */
   11018 static int
   11019 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11020 {
   11021 	struct wm_softc *sc = device_private(dev);
   11022 	int page_select, rv;
   11023 	uint16_t temp, temp2;
   11024 
   11025 	if (phy != 1) /* Only one PHY on kumeran bus */
   11026 		return -1;
   11027 
   11028 	if (sc->phy.acquire(sc)) {
   11029 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11030 		return -1;
   11031 	}
   11032 
   11033 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11034 		page_select = GG82563_PHY_PAGE_SELECT;
   11035 	else {
   11036 		/*
   11037 		 * Use Alternative Page Select register to access registers
   11038 		 * 30 and 31.
   11039 		 */
   11040 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11041 	}
   11042 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11043 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11044 		goto out;
   11045 
   11046 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11047 		/*
   11048 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11049 		 * register.
   11050 		 */
   11051 		delay(200);
   11052 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11053 		if ((rv != 0) || (temp2 != temp)) {
   11054 			device_printf(dev, "%s failed\n", __func__);
   11055 			rv = -1;
   11056 			goto out;
   11057 		}
   11058 		delay(200);
   11059 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11060 		delay(200);
   11061 	} else
   11062 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11063 
   11064 out:
   11065 	sc->phy.release(sc);
   11066 	return rv;
   11067 }
   11068 
   11069 /*
   11070  * wm_gmii_bm_readreg:	[mii interface function]
   11071  *
   11072  *	Read a PHY register on the kumeran
   11073  * This could be handled by the PHY layer if we didn't have to lock the
   11074  * ressource ...
   11075  */
   11076 static int
   11077 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11078 {
   11079 	struct wm_softc *sc = device_private(dev);
   11080 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11081 	int rv;
   11082 
   11083 	if (sc->phy.acquire(sc)) {
   11084 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11085 		return -1;
   11086 	}
   11087 
   11088 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11089 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11090 		    || (reg == 31)) ? 1 : phy;
   11091 	/* Page 800 works differently than the rest so it has its own func */
   11092 	if (page == BM_WUC_PAGE) {
   11093 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11094 		goto release;
   11095 	}
   11096 
   11097 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11098 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11099 		    && (sc->sc_type != WM_T_82583))
   11100 			rv = wm_gmii_mdic_writereg(dev, phy,
   11101 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11102 		else
   11103 			rv = wm_gmii_mdic_writereg(dev, phy,
   11104 			    BME1000_PHY_PAGE_SELECT, page);
   11105 		if (rv != 0)
   11106 			goto release;
   11107 	}
   11108 
   11109 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11110 
   11111 release:
   11112 	sc->phy.release(sc);
   11113 	return rv;
   11114 }
   11115 
   11116 /*
   11117  * wm_gmii_bm_writereg:	[mii interface function]
   11118  *
   11119  *	Write a PHY register on the kumeran.
   11120  * This could be handled by the PHY layer if we didn't have to lock the
   11121  * ressource ...
   11122  */
   11123 static int
   11124 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11125 {
   11126 	struct wm_softc *sc = device_private(dev);
   11127 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11128 	int rv;
   11129 
   11130 	if (sc->phy.acquire(sc)) {
   11131 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11132 		return -1;
   11133 	}
   11134 
   11135 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11136 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11137 		    || (reg == 31)) ? 1 : phy;
   11138 	/* Page 800 works differently than the rest so it has its own func */
   11139 	if (page == BM_WUC_PAGE) {
   11140 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11141 		goto release;
   11142 	}
   11143 
   11144 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11145 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11146 		    && (sc->sc_type != WM_T_82583))
   11147 			rv = wm_gmii_mdic_writereg(dev, phy,
   11148 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11149 		else
   11150 			rv = wm_gmii_mdic_writereg(dev, phy,
   11151 			    BME1000_PHY_PAGE_SELECT, page);
   11152 		if (rv != 0)
   11153 			goto release;
   11154 	}
   11155 
   11156 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11157 
   11158 release:
   11159 	sc->phy.release(sc);
   11160 	return rv;
   11161 }
   11162 
   11163 /*
   11164  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11165  *  @dev: pointer to the HW structure
   11166  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11167  *
   11168  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11169  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11170  */
   11171 static int
   11172 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11173 {
   11174 	uint16_t temp;
   11175 	int rv;
   11176 
   11177 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11178 		device_xname(dev), __func__));
   11179 
   11180 	if (!phy_regp)
   11181 		return -1;
   11182 
   11183 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11184 
   11185 	/* Select Port Control Registers page */
   11186 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11187 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11188 	if (rv != 0)
   11189 		return rv;
   11190 
   11191 	/* Read WUCE and save it */
   11192 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11193 	if (rv != 0)
   11194 		return rv;
   11195 
   11196 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11197 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11198 	 */
   11199 	temp = *phy_regp;
   11200 	temp |= BM_WUC_ENABLE_BIT;
   11201 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11202 
   11203 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11204 		return rv;
   11205 
   11206 	/* Select Host Wakeup Registers page - caller now able to write
   11207 	 * registers on the Wakeup registers page
   11208 	 */
   11209 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11210 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11211 }
   11212 
   11213 /*
   11214  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11215  *  @dev: pointer to the HW structure
   11216  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11217  *
   11218  *  Restore BM_WUC_ENABLE_REG to its original value.
   11219  *
   11220  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11221  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11222  *  caller.
   11223  */
   11224 static int
   11225 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11226 {
   11227 
   11228 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11229 		device_xname(dev), __func__));
   11230 
   11231 	if (!phy_regp)
   11232 		return -1;
   11233 
   11234 	/* Select Port Control Registers page */
   11235 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11236 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11237 
   11238 	/* Restore 769.17 to its original value */
   11239 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11240 
   11241 	return 0;
   11242 }
   11243 
   11244 /*
   11245  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11246  *  @sc: pointer to the HW structure
   11247  *  @offset: register offset to be read or written
   11248  *  @val: pointer to the data to read or write
   11249  *  @rd: determines if operation is read or write
   11250  *  @page_set: BM_WUC_PAGE already set and access enabled
   11251  *
   11252  *  Read the PHY register at offset and store the retrieved information in
   11253  *  data, or write data to PHY register at offset.  Note the procedure to
   11254  *  access the PHY wakeup registers is different than reading the other PHY
   11255  *  registers. It works as such:
   11256  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11257  *  2) Set page to 800 for host (801 if we were manageability)
   11258  *  3) Write the address using the address opcode (0x11)
   11259  *  4) Read or write the data using the data opcode (0x12)
   11260  *  5) Restore 769.17.2 to its original value
   11261  *
   11262  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11263  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11264  *
   11265  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11266  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11267  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11268  */
   11269 static int
   11270 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11271 	bool page_set)
   11272 {
   11273 	struct wm_softc *sc = device_private(dev);
   11274 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11275 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11276 	uint16_t wuce;
   11277 	int rv = 0;
   11278 
   11279 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11280 		device_xname(dev), __func__));
   11281 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11282 	if ((sc->sc_type == WM_T_PCH)
   11283 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11284 		device_printf(dev,
   11285 		    "Attempting to access page %d while gig enabled.\n", page);
   11286 	}
   11287 
   11288 	if (!page_set) {
   11289 		/* Enable access to PHY wakeup registers */
   11290 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11291 		if (rv != 0) {
   11292 			device_printf(dev,
   11293 			    "%s: Could not enable PHY wakeup reg access\n",
   11294 			    __func__);
   11295 			return rv;
   11296 		}
   11297 	}
   11298 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11299 		device_xname(sc->sc_dev), __func__, page, regnum));
   11300 
   11301 	/*
   11302 	 * 2) Access PHY wakeup register.
   11303 	 * See wm_access_phy_wakeup_reg_bm.
   11304 	 */
   11305 
   11306 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11307 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11308 	if (rv != 0)
   11309 		return rv;
   11310 
   11311 	if (rd) {
   11312 		/* Read the Wakeup register page value using opcode 0x12 */
   11313 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11314 	} else {
   11315 		/* Write the Wakeup register page value using opcode 0x12 */
   11316 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11317 	}
   11318 	if (rv != 0)
   11319 		return rv;
   11320 
   11321 	if (!page_set)
   11322 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11323 
   11324 	return rv;
   11325 }
   11326 
   11327 /*
   11328  * wm_gmii_hv_readreg:	[mii interface function]
   11329  *
   11330  *	Read a PHY register on the kumeran
   11331  * This could be handled by the PHY layer if we didn't have to lock the
   11332  * ressource ...
   11333  */
   11334 static int
   11335 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11336 {
   11337 	struct wm_softc *sc = device_private(dev);
   11338 	int rv;
   11339 
   11340 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11341 		device_xname(dev), __func__));
   11342 	if (sc->phy.acquire(sc)) {
   11343 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11344 		return -1;
   11345 	}
   11346 
   11347 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11348 	sc->phy.release(sc);
   11349 	return rv;
   11350 }
   11351 
   11352 static int
   11353 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11354 {
   11355 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11356 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11357 	int rv;
   11358 
   11359 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11360 
   11361 	/* Page 800 works differently than the rest so it has its own func */
   11362 	if (page == BM_WUC_PAGE)
   11363 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11364 
   11365 	/*
   11366 	 * Lower than page 768 works differently than the rest so it has its
   11367 	 * own func
   11368 	 */
   11369 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11370 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11371 		return -1;
   11372 	}
   11373 
   11374 	/*
   11375 	 * XXX I21[789] documents say that the SMBus Address register is at
   11376 	 * PHY address 01, Page 0 (not 768), Register 26.
   11377 	 */
   11378 	if (page == HV_INTC_FC_PAGE_START)
   11379 		page = 0;
   11380 
   11381 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11382 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11383 		    page << BME1000_PAGE_SHIFT);
   11384 		if (rv != 0)
   11385 			return rv;
   11386 	}
   11387 
   11388 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11389 }
   11390 
   11391 /*
   11392  * wm_gmii_hv_writereg:	[mii interface function]
   11393  *
   11394  *	Write a PHY register on the kumeran.
   11395  * This could be handled by the PHY layer if we didn't have to lock the
   11396  * ressource ...
   11397  */
   11398 static int
   11399 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11400 {
   11401 	struct wm_softc *sc = device_private(dev);
   11402 	int rv;
   11403 
   11404 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11405 		device_xname(dev), __func__));
   11406 
   11407 	if (sc->phy.acquire(sc)) {
   11408 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11409 		return -1;
   11410 	}
   11411 
   11412 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11413 	sc->phy.release(sc);
   11414 
   11415 	return rv;
   11416 }
   11417 
   11418 static int
   11419 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11420 {
   11421 	struct wm_softc *sc = device_private(dev);
   11422 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11423 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11424 	int rv;
   11425 
   11426 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11427 
   11428 	/* Page 800 works differently than the rest so it has its own func */
   11429 	if (page == BM_WUC_PAGE)
   11430 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11431 		    false);
   11432 
   11433 	/*
   11434 	 * Lower than page 768 works differently than the rest so it has its
   11435 	 * own func
   11436 	 */
   11437 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11438 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11439 		return -1;
   11440 	}
   11441 
   11442 	{
   11443 		/*
   11444 		 * XXX I21[789] documents say that the SMBus Address register
   11445 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11446 		 */
   11447 		if (page == HV_INTC_FC_PAGE_START)
   11448 			page = 0;
   11449 
   11450 		/*
   11451 		 * XXX Workaround MDIO accesses being disabled after entering
   11452 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11453 		 * register is set)
   11454 		 */
   11455 		if (sc->sc_phytype == WMPHY_82578) {
   11456 			struct mii_softc *child;
   11457 
   11458 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11459 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11460 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11461 			    && ((val & (1 << 11)) != 0)) {
   11462 				device_printf(dev, "XXX need workaround\n");
   11463 			}
   11464 		}
   11465 
   11466 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11467 			rv = wm_gmii_mdic_writereg(dev, 1,
   11468 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11469 			if (rv != 0)
   11470 				return rv;
   11471 		}
   11472 	}
   11473 
   11474 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11475 }
   11476 
   11477 /*
   11478  * wm_gmii_82580_readreg:	[mii interface function]
   11479  *
   11480  *	Read a PHY register on the 82580 and I350.
   11481  * This could be handled by the PHY layer if we didn't have to lock the
   11482  * ressource ...
   11483  */
   11484 static int
   11485 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11486 {
   11487 	struct wm_softc *sc = device_private(dev);
   11488 	int rv;
   11489 
   11490 	if (sc->phy.acquire(sc) != 0) {
   11491 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11492 		return -1;
   11493 	}
   11494 
   11495 #ifdef DIAGNOSTIC
   11496 	if (reg > MII_ADDRMASK) {
   11497 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11498 		    __func__, sc->sc_phytype, reg);
   11499 		reg &= MII_ADDRMASK;
   11500 	}
   11501 #endif
   11502 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11503 
   11504 	sc->phy.release(sc);
   11505 	return rv;
   11506 }
   11507 
   11508 /*
   11509  * wm_gmii_82580_writereg:	[mii interface function]
   11510  *
   11511  *	Write a PHY register on the 82580 and I350.
   11512  * This could be handled by the PHY layer if we didn't have to lock the
   11513  * ressource ...
   11514  */
   11515 static int
   11516 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11517 {
   11518 	struct wm_softc *sc = device_private(dev);
   11519 	int rv;
   11520 
   11521 	if (sc->phy.acquire(sc) != 0) {
   11522 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11523 		return -1;
   11524 	}
   11525 
   11526 #ifdef DIAGNOSTIC
   11527 	if (reg > MII_ADDRMASK) {
   11528 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11529 		    __func__, sc->sc_phytype, reg);
   11530 		reg &= MII_ADDRMASK;
   11531 	}
   11532 #endif
   11533 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11534 
   11535 	sc->phy.release(sc);
   11536 	return rv;
   11537 }
   11538 
   11539 /*
   11540  * wm_gmii_gs40g_readreg:	[mii interface function]
   11541  *
   11542  *	Read a PHY register on the I2100 and I211.
   11543  * This could be handled by the PHY layer if we didn't have to lock the
   11544  * ressource ...
   11545  */
   11546 static int
   11547 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11548 {
   11549 	struct wm_softc *sc = device_private(dev);
   11550 	int page, offset;
   11551 	int rv;
   11552 
   11553 	/* Acquire semaphore */
   11554 	if (sc->phy.acquire(sc)) {
   11555 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11556 		return -1;
   11557 	}
   11558 
   11559 	/* Page select */
   11560 	page = reg >> GS40G_PAGE_SHIFT;
   11561 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11562 	if (rv != 0)
   11563 		goto release;
   11564 
   11565 	/* Read reg */
   11566 	offset = reg & GS40G_OFFSET_MASK;
   11567 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11568 
   11569 release:
   11570 	sc->phy.release(sc);
   11571 	return rv;
   11572 }
   11573 
   11574 /*
   11575  * wm_gmii_gs40g_writereg:	[mii interface function]
   11576  *
   11577  *	Write a PHY register on the I210 and I211.
   11578  * This could be handled by the PHY layer if we didn't have to lock the
   11579  * ressource ...
   11580  */
   11581 static int
   11582 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11583 {
   11584 	struct wm_softc *sc = device_private(dev);
   11585 	uint16_t page;
   11586 	int offset, rv;
   11587 
   11588 	/* Acquire semaphore */
   11589 	if (sc->phy.acquire(sc)) {
   11590 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11591 		return -1;
   11592 	}
   11593 
   11594 	/* Page select */
   11595 	page = reg >> GS40G_PAGE_SHIFT;
   11596 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11597 	if (rv != 0)
   11598 		goto release;
   11599 
   11600 	/* Write reg */
   11601 	offset = reg & GS40G_OFFSET_MASK;
   11602 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11603 
   11604 release:
   11605 	/* Release semaphore */
   11606 	sc->phy.release(sc);
   11607 	return rv;
   11608 }
   11609 
   11610 /*
   11611  * wm_gmii_statchg:	[mii interface function]
   11612  *
   11613  *	Callback from MII layer when media changes.
   11614  */
   11615 static void
   11616 wm_gmii_statchg(struct ifnet *ifp)
   11617 {
   11618 	struct wm_softc *sc = ifp->if_softc;
   11619 	struct mii_data *mii = &sc->sc_mii;
   11620 
   11621 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11622 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11623 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11624 
   11625 	/* Get flow control negotiation result. */
   11626 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11627 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11628 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11629 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11630 	}
   11631 
   11632 	if (sc->sc_flowflags & IFM_FLOW) {
   11633 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11634 			sc->sc_ctrl |= CTRL_TFCE;
   11635 			sc->sc_fcrtl |= FCRTL_XONE;
   11636 		}
   11637 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11638 			sc->sc_ctrl |= CTRL_RFCE;
   11639 	}
   11640 
   11641 	if (mii->mii_media_active & IFM_FDX) {
   11642 		DPRINTF(WM_DEBUG_LINK,
   11643 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11644 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11645 	} else {
   11646 		DPRINTF(WM_DEBUG_LINK,
   11647 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11648 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11649 	}
   11650 
   11651 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11652 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11653 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11654 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11655 	if (sc->sc_type == WM_T_80003) {
   11656 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11657 		case IFM_1000_T:
   11658 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11659 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11660 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11661 			break;
   11662 		default:
   11663 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11664 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11665 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11666 			break;
   11667 		}
   11668 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11669 	}
   11670 }
   11671 
   11672 /* kumeran related (80003, ICH* and PCH*) */
   11673 
   11674 /*
   11675  * wm_kmrn_readreg:
   11676  *
   11677  *	Read a kumeran register
   11678  */
   11679 static int
   11680 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11681 {
   11682 	int rv;
   11683 
   11684 	if (sc->sc_type == WM_T_80003)
   11685 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11686 	else
   11687 		rv = sc->phy.acquire(sc);
   11688 	if (rv != 0) {
   11689 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11690 		    __func__);
   11691 		return rv;
   11692 	}
   11693 
   11694 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11695 
   11696 	if (sc->sc_type == WM_T_80003)
   11697 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11698 	else
   11699 		sc->phy.release(sc);
   11700 
   11701 	return rv;
   11702 }
   11703 
   11704 static int
   11705 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11706 {
   11707 
   11708 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11709 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11710 	    KUMCTRLSTA_REN);
   11711 	CSR_WRITE_FLUSH(sc);
   11712 	delay(2);
   11713 
   11714 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11715 
   11716 	return 0;
   11717 }
   11718 
   11719 /*
   11720  * wm_kmrn_writereg:
   11721  *
   11722  *	Write a kumeran register
   11723  */
   11724 static int
   11725 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11726 {
   11727 	int rv;
   11728 
   11729 	if (sc->sc_type == WM_T_80003)
   11730 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11731 	else
   11732 		rv = sc->phy.acquire(sc);
   11733 	if (rv != 0) {
   11734 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11735 		    __func__);
   11736 		return rv;
   11737 	}
   11738 
   11739 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11740 
   11741 	if (sc->sc_type == WM_T_80003)
   11742 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11743 	else
   11744 		sc->phy.release(sc);
   11745 
   11746 	return rv;
   11747 }
   11748 
   11749 static int
   11750 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11751 {
   11752 
   11753 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11754 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11755 
   11756 	return 0;
   11757 }
   11758 
   11759 /*
   11760  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11761  * This access method is different from IEEE MMD.
   11762  */
   11763 static int
   11764 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11765 {
   11766 	struct wm_softc *sc = device_private(dev);
   11767 	int rv;
   11768 
   11769 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11770 	if (rv != 0)
   11771 		return rv;
   11772 
   11773 	if (rd)
   11774 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11775 	else
   11776 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11777 	return rv;
   11778 }
   11779 
   11780 static int
   11781 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11782 {
   11783 
   11784 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11785 }
   11786 
   11787 static int
   11788 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11789 {
   11790 
   11791 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11792 }
   11793 
   11794 /* SGMII related */
   11795 
   11796 /*
   11797  * wm_sgmii_uses_mdio
   11798  *
   11799  * Check whether the transaction is to the internal PHY or the external
   11800  * MDIO interface. Return true if it's MDIO.
   11801  */
   11802 static bool
   11803 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11804 {
   11805 	uint32_t reg;
   11806 	bool ismdio = false;
   11807 
   11808 	switch (sc->sc_type) {
   11809 	case WM_T_82575:
   11810 	case WM_T_82576:
   11811 		reg = CSR_READ(sc, WMREG_MDIC);
   11812 		ismdio = ((reg & MDIC_DEST) != 0);
   11813 		break;
   11814 	case WM_T_82580:
   11815 	case WM_T_I350:
   11816 	case WM_T_I354:
   11817 	case WM_T_I210:
   11818 	case WM_T_I211:
   11819 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11820 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11821 		break;
   11822 	default:
   11823 		break;
   11824 	}
   11825 
   11826 	return ismdio;
   11827 }
   11828 
   11829 /*
   11830  * wm_sgmii_readreg:	[mii interface function]
   11831  *
   11832  *	Read a PHY register on the SGMII
   11833  * This could be handled by the PHY layer if we didn't have to lock the
   11834  * ressource ...
   11835  */
   11836 static int
   11837 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11838 {
   11839 	struct wm_softc *sc = device_private(dev);
   11840 	int rv;
   11841 
   11842 	if (sc->phy.acquire(sc)) {
   11843 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11844 		return -1;
   11845 	}
   11846 
   11847 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11848 
   11849 	sc->phy.release(sc);
   11850 	return rv;
   11851 }
   11852 
   11853 static int
   11854 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11855 {
   11856 	struct wm_softc *sc = device_private(dev);
   11857 	uint32_t i2ccmd;
   11858 	int i, rv = 0;
   11859 
   11860 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11861 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11862 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11863 
   11864 	/* Poll the ready bit */
   11865 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11866 		delay(50);
   11867 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11868 		if (i2ccmd & I2CCMD_READY)
   11869 			break;
   11870 	}
   11871 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11872 		device_printf(dev, "I2CCMD Read did not complete\n");
   11873 		rv = ETIMEDOUT;
   11874 	}
   11875 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11876 		if (!sc->phy.no_errprint)
   11877 			device_printf(dev, "I2CCMD Error bit set\n");
   11878 		rv = EIO;
   11879 	}
   11880 
   11881 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11882 
   11883 	return rv;
   11884 }
   11885 
   11886 /*
   11887  * wm_sgmii_writereg:	[mii interface function]
   11888  *
   11889  *	Write a PHY register on the SGMII.
   11890  * This could be handled by the PHY layer if we didn't have to lock the
   11891  * ressource ...
   11892  */
   11893 static int
   11894 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11895 {
   11896 	struct wm_softc *sc = device_private(dev);
   11897 	int rv;
   11898 
   11899 	if (sc->phy.acquire(sc) != 0) {
   11900 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11901 		return -1;
   11902 	}
   11903 
   11904 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11905 
   11906 	sc->phy.release(sc);
   11907 
   11908 	return rv;
   11909 }
   11910 
   11911 static int
   11912 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11913 {
   11914 	struct wm_softc *sc = device_private(dev);
   11915 	uint32_t i2ccmd;
   11916 	uint16_t swapdata;
   11917 	int rv = 0;
   11918 	int i;
   11919 
   11920 	/* Swap the data bytes for the I2C interface */
   11921 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11922 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11923 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11924 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11925 
   11926 	/* Poll the ready bit */
   11927 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11928 		delay(50);
   11929 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11930 		if (i2ccmd & I2CCMD_READY)
   11931 			break;
   11932 	}
   11933 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11934 		device_printf(dev, "I2CCMD Write did not complete\n");
   11935 		rv = ETIMEDOUT;
   11936 	}
   11937 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11938 		device_printf(dev, "I2CCMD Error bit set\n");
   11939 		rv = EIO;
   11940 	}
   11941 
   11942 	return rv;
   11943 }
   11944 
   11945 /* TBI related */
   11946 
   11947 static bool
   11948 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11949 {
   11950 	bool sig;
   11951 
   11952 	sig = ctrl & CTRL_SWDPIN(1);
   11953 
   11954 	/*
   11955 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11956 	 * detect a signal, 1 if they don't.
   11957 	 */
   11958 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11959 		sig = !sig;
   11960 
   11961 	return sig;
   11962 }
   11963 
   11964 /*
   11965  * wm_tbi_mediainit:
   11966  *
   11967  *	Initialize media for use on 1000BASE-X devices.
   11968  */
   11969 static void
   11970 wm_tbi_mediainit(struct wm_softc *sc)
   11971 {
   11972 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11973 	const char *sep = "";
   11974 
   11975 	if (sc->sc_type < WM_T_82543)
   11976 		sc->sc_tipg = TIPG_WM_DFLT;
   11977 	else
   11978 		sc->sc_tipg = TIPG_LG_DFLT;
   11979 
   11980 	sc->sc_tbi_serdes_anegticks = 5;
   11981 
   11982 	/* Initialize our media structures */
   11983 	sc->sc_mii.mii_ifp = ifp;
   11984 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11985 
   11986 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11987 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11988 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   11989 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   11990 		    sc->sc_core_lock);
   11991 	} else {
   11992 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   11993 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   11994 	}
   11995 
   11996 	/*
   11997 	 * SWD Pins:
   11998 	 *
   11999 	 *	0 = Link LED (output)
   12000 	 *	1 = Loss Of Signal (input)
   12001 	 */
   12002 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12003 
   12004 	/* XXX Perhaps this is only for TBI */
   12005 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12006 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12007 
   12008 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12009 		sc->sc_ctrl &= ~CTRL_LRST;
   12010 
   12011 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12012 
   12013 #define	ADD(ss, mm, dd)							\
   12014 do {									\
   12015 	aprint_normal("%s%s", sep, ss);					\
   12016 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12017 	sep = ", ";							\
   12018 } while (/*CONSTCOND*/0)
   12019 
   12020 	aprint_normal_dev(sc->sc_dev, "");
   12021 
   12022 	if (sc->sc_type == WM_T_I354) {
   12023 		uint32_t status;
   12024 
   12025 		status = CSR_READ(sc, WMREG_STATUS);
   12026 		if (((status & STATUS_2P5_SKU) != 0)
   12027 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12028 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12029 		} else
   12030 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12031 	} else if (sc->sc_type == WM_T_82545) {
   12032 		/* Only 82545 is LX (XXX except SFP) */
   12033 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12034 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12035 	} else if (sc->sc_sfptype != 0) {
   12036 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12037 		switch (sc->sc_sfptype) {
   12038 		default:
   12039 		case SFF_SFP_ETH_FLAGS_1000SX:
   12040 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12041 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12042 			break;
   12043 		case SFF_SFP_ETH_FLAGS_1000LX:
   12044 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12045 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12046 			break;
   12047 		case SFF_SFP_ETH_FLAGS_1000CX:
   12048 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12049 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12050 			break;
   12051 		case SFF_SFP_ETH_FLAGS_1000T:
   12052 			ADD("1000baseT", IFM_1000_T, 0);
   12053 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12054 			break;
   12055 		case SFF_SFP_ETH_FLAGS_100FX:
   12056 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12057 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12058 			break;
   12059 		}
   12060 	} else {
   12061 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12062 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12063 	}
   12064 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12065 	aprint_normal("\n");
   12066 
   12067 #undef ADD
   12068 
   12069 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12070 }
   12071 
   12072 /*
   12073  * wm_tbi_mediachange:	[ifmedia interface function]
   12074  *
   12075  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12076  */
   12077 static int
   12078 wm_tbi_mediachange(struct ifnet *ifp)
   12079 {
   12080 	struct wm_softc *sc = ifp->if_softc;
   12081 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12082 	uint32_t status, ctrl;
   12083 	bool signal;
   12084 	int i;
   12085 
   12086 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12087 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12088 		/* XXX need some work for >= 82571 and < 82575 */
   12089 		if (sc->sc_type < WM_T_82575)
   12090 			return 0;
   12091 	}
   12092 
   12093 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12094 	    || (sc->sc_type >= WM_T_82575))
   12095 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12096 
   12097 	sc->sc_ctrl &= ~CTRL_LRST;
   12098 	sc->sc_txcw = TXCW_ANE;
   12099 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12100 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12101 	else if (ife->ifm_media & IFM_FDX)
   12102 		sc->sc_txcw |= TXCW_FD;
   12103 	else
   12104 		sc->sc_txcw |= TXCW_HD;
   12105 
   12106 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12107 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12108 
   12109 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12110 		device_xname(sc->sc_dev), sc->sc_txcw));
   12111 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12112 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12113 	CSR_WRITE_FLUSH(sc);
   12114 	delay(1000);
   12115 
   12116 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12117 	signal = wm_tbi_havesignal(sc, ctrl);
   12118 
   12119 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12120 		signal));
   12121 
   12122 	if (signal) {
   12123 		/* Have signal; wait for the link to come up. */
   12124 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12125 			delay(10000);
   12126 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12127 				break;
   12128 		}
   12129 
   12130 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12131 			device_xname(sc->sc_dev), i));
   12132 
   12133 		status = CSR_READ(sc, WMREG_STATUS);
   12134 		DPRINTF(WM_DEBUG_LINK,
   12135 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12136 			device_xname(sc->sc_dev), status, STATUS_LU));
   12137 		if (status & STATUS_LU) {
   12138 			/* Link is up. */
   12139 			DPRINTF(WM_DEBUG_LINK,
   12140 			    ("%s: LINK: set media -> link up %s\n",
   12141 				device_xname(sc->sc_dev),
   12142 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12143 
   12144 			/*
   12145 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12146 			 * so we should update sc->sc_ctrl
   12147 			 */
   12148 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12149 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12150 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12151 			if (status & STATUS_FD)
   12152 				sc->sc_tctl |=
   12153 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12154 			else
   12155 				sc->sc_tctl |=
   12156 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12157 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12158 				sc->sc_fcrtl |= FCRTL_XONE;
   12159 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12160 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12161 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12162 			sc->sc_tbi_linkup = 1;
   12163 		} else {
   12164 			if (i == WM_LINKUP_TIMEOUT)
   12165 				wm_check_for_link(sc);
   12166 			/* Link is down. */
   12167 			DPRINTF(WM_DEBUG_LINK,
   12168 			    ("%s: LINK: set media -> link down\n",
   12169 				device_xname(sc->sc_dev)));
   12170 			sc->sc_tbi_linkup = 0;
   12171 		}
   12172 	} else {
   12173 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12174 			device_xname(sc->sc_dev)));
   12175 		sc->sc_tbi_linkup = 0;
   12176 	}
   12177 
   12178 	wm_tbi_serdes_set_linkled(sc);
   12179 
   12180 	return 0;
   12181 }
   12182 
   12183 /*
   12184  * wm_tbi_mediastatus:	[ifmedia interface function]
   12185  *
   12186  *	Get the current interface media status on a 1000BASE-X device.
   12187  */
   12188 static void
   12189 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12190 {
   12191 	struct wm_softc *sc = ifp->if_softc;
   12192 	uint32_t ctrl, status;
   12193 
   12194 	ifmr->ifm_status = IFM_AVALID;
   12195 	ifmr->ifm_active = IFM_ETHER;
   12196 
   12197 	status = CSR_READ(sc, WMREG_STATUS);
   12198 	if ((status & STATUS_LU) == 0) {
   12199 		ifmr->ifm_active |= IFM_NONE;
   12200 		return;
   12201 	}
   12202 
   12203 	ifmr->ifm_status |= IFM_ACTIVE;
   12204 	/* Only 82545 is LX */
   12205 	if (sc->sc_type == WM_T_82545)
   12206 		ifmr->ifm_active |= IFM_1000_LX;
   12207 	else
   12208 		ifmr->ifm_active |= IFM_1000_SX;
   12209 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12210 		ifmr->ifm_active |= IFM_FDX;
   12211 	else
   12212 		ifmr->ifm_active |= IFM_HDX;
   12213 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12214 	if (ctrl & CTRL_RFCE)
   12215 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12216 	if (ctrl & CTRL_TFCE)
   12217 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12218 }
   12219 
   12220 /* XXX TBI only */
   12221 static int
   12222 wm_check_for_link(struct wm_softc *sc)
   12223 {
   12224 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12225 	uint32_t rxcw;
   12226 	uint32_t ctrl;
   12227 	uint32_t status;
   12228 	bool signal;
   12229 
   12230 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   12231 		device_xname(sc->sc_dev), __func__));
   12232 
   12233 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12234 		/* XXX need some work for >= 82571 */
   12235 		if (sc->sc_type >= WM_T_82571) {
   12236 			sc->sc_tbi_linkup = 1;
   12237 			return 0;
   12238 		}
   12239 	}
   12240 
   12241 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12242 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12243 	status = CSR_READ(sc, WMREG_STATUS);
   12244 	signal = wm_tbi_havesignal(sc, ctrl);
   12245 
   12246 	DPRINTF(WM_DEBUG_LINK,
   12247 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12248 		device_xname(sc->sc_dev), __func__, signal,
   12249 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12250 
   12251 	/*
   12252 	 * SWDPIN   LU RXCW
   12253 	 *	0    0	  0
   12254 	 *	0    0	  1	(should not happen)
   12255 	 *	0    1	  0	(should not happen)
   12256 	 *	0    1	  1	(should not happen)
   12257 	 *	1    0	  0	Disable autonego and force linkup
   12258 	 *	1    0	  1	got /C/ but not linkup yet
   12259 	 *	1    1	  0	(linkup)
   12260 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12261 	 *
   12262 	 */
   12263 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12264 		DPRINTF(WM_DEBUG_LINK,
   12265 		    ("%s: %s: force linkup and fullduplex\n",
   12266 			device_xname(sc->sc_dev), __func__));
   12267 		sc->sc_tbi_linkup = 0;
   12268 		/* Disable auto-negotiation in the TXCW register */
   12269 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12270 
   12271 		/*
   12272 		 * Force link-up and also force full-duplex.
   12273 		 *
   12274 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12275 		 * so we should update sc->sc_ctrl
   12276 		 */
   12277 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12278 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12279 	} else if (((status & STATUS_LU) != 0)
   12280 	    && ((rxcw & RXCW_C) != 0)
   12281 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12282 		sc->sc_tbi_linkup = 1;
   12283 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12284 			device_xname(sc->sc_dev),
   12285 			__func__));
   12286 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12287 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12288 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12289 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12290 			device_xname(sc->sc_dev), __func__));
   12291 	} else {
   12292 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12293 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12294 			status));
   12295 	}
   12296 
   12297 	return 0;
   12298 }
   12299 
   12300 /*
   12301  * wm_tbi_tick:
   12302  *
   12303  *	Check the link on TBI devices.
   12304  *	This function acts as mii_tick().
   12305  */
   12306 static void
   12307 wm_tbi_tick(struct wm_softc *sc)
   12308 {
   12309 	struct mii_data *mii = &sc->sc_mii;
   12310 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12311 	uint32_t status;
   12312 
   12313 	KASSERT(WM_CORE_LOCKED(sc));
   12314 
   12315 	status = CSR_READ(sc, WMREG_STATUS);
   12316 
   12317 	/* XXX is this needed? */
   12318 	(void)CSR_READ(sc, WMREG_RXCW);
   12319 	(void)CSR_READ(sc, WMREG_CTRL);
   12320 
   12321 	/* set link status */
   12322 	if ((status & STATUS_LU) == 0) {
   12323 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12324 			device_xname(sc->sc_dev)));
   12325 		sc->sc_tbi_linkup = 0;
   12326 	} else if (sc->sc_tbi_linkup == 0) {
   12327 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12328 			device_xname(sc->sc_dev),
   12329 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12330 		sc->sc_tbi_linkup = 1;
   12331 		sc->sc_tbi_serdes_ticks = 0;
   12332 	}
   12333 
   12334 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12335 		goto setled;
   12336 
   12337 	if ((status & STATUS_LU) == 0) {
   12338 		sc->sc_tbi_linkup = 0;
   12339 		/* If the timer expired, retry autonegotiation */
   12340 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12341 		    && (++sc->sc_tbi_serdes_ticks
   12342 			>= sc->sc_tbi_serdes_anegticks)) {
   12343 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12344 				device_xname(sc->sc_dev), __func__));
   12345 			sc->sc_tbi_serdes_ticks = 0;
   12346 			/*
   12347 			 * Reset the link, and let autonegotiation do
   12348 			 * its thing
   12349 			 */
   12350 			sc->sc_ctrl |= CTRL_LRST;
   12351 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12352 			CSR_WRITE_FLUSH(sc);
   12353 			delay(1000);
   12354 			sc->sc_ctrl &= ~CTRL_LRST;
   12355 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12356 			CSR_WRITE_FLUSH(sc);
   12357 			delay(1000);
   12358 			CSR_WRITE(sc, WMREG_TXCW,
   12359 			    sc->sc_txcw & ~TXCW_ANE);
   12360 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12361 		}
   12362 	}
   12363 
   12364 setled:
   12365 	wm_tbi_serdes_set_linkled(sc);
   12366 }
   12367 
   12368 /* SERDES related */
   12369 static void
   12370 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12371 {
   12372 	uint32_t reg;
   12373 
   12374 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12375 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12376 		return;
   12377 
   12378 	/* Enable PCS to turn on link */
   12379 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12380 	reg |= PCS_CFG_PCS_EN;
   12381 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12382 
   12383 	/* Power up the laser */
   12384 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12385 	reg &= ~CTRL_EXT_SWDPIN(3);
   12386 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12387 
   12388 	/* Flush the write to verify completion */
   12389 	CSR_WRITE_FLUSH(sc);
   12390 	delay(1000);
   12391 }
   12392 
   12393 static int
   12394 wm_serdes_mediachange(struct ifnet *ifp)
   12395 {
   12396 	struct wm_softc *sc = ifp->if_softc;
   12397 	bool pcs_autoneg = true; /* XXX */
   12398 	uint32_t ctrl_ext, pcs_lctl, reg;
   12399 
   12400 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12401 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12402 		return 0;
   12403 
   12404 	/* XXX Currently, this function is not called on 8257[12] */
   12405 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12406 	    || (sc->sc_type >= WM_T_82575))
   12407 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12408 
   12409 	/* Power on the sfp cage if present */
   12410 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12411 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12412 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12413 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12414 
   12415 	sc->sc_ctrl |= CTRL_SLU;
   12416 
   12417 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12418 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12419 
   12420 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12421 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12422 	case CTRL_EXT_LINK_MODE_SGMII:
   12423 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12424 		pcs_autoneg = true;
   12425 		/* Autoneg time out should be disabled for SGMII mode */
   12426 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12427 		break;
   12428 	case CTRL_EXT_LINK_MODE_1000KX:
   12429 		pcs_autoneg = false;
   12430 		/* FALLTHROUGH */
   12431 	default:
   12432 		if ((sc->sc_type == WM_T_82575)
   12433 		    || (sc->sc_type == WM_T_82576)) {
   12434 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12435 				pcs_autoneg = false;
   12436 		}
   12437 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12438 		    | CTRL_FRCFDX;
   12439 
   12440 		/* Set speed of 1000/Full if speed/duplex is forced */
   12441 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12442 	}
   12443 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12444 
   12445 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12446 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12447 
   12448 	if (pcs_autoneg) {
   12449 		/* Set PCS register for autoneg */
   12450 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12451 
   12452 		/* Disable force flow control for autoneg */
   12453 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12454 
   12455 		/* Configure flow control advertisement for autoneg */
   12456 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12457 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12458 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12459 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12460 	} else
   12461 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12462 
   12463 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12464 
   12465 	return 0;
   12466 }
   12467 
   12468 static void
   12469 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12470 {
   12471 	struct wm_softc *sc = ifp->if_softc;
   12472 	struct mii_data *mii = &sc->sc_mii;
   12473 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12474 	uint32_t pcs_adv, pcs_lpab, reg;
   12475 
   12476 	ifmr->ifm_status = IFM_AVALID;
   12477 	ifmr->ifm_active = IFM_ETHER;
   12478 
   12479 	/* Check PCS */
   12480 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12481 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12482 		ifmr->ifm_active |= IFM_NONE;
   12483 		sc->sc_tbi_linkup = 0;
   12484 		goto setled;
   12485 	}
   12486 
   12487 	sc->sc_tbi_linkup = 1;
   12488 	ifmr->ifm_status |= IFM_ACTIVE;
   12489 	if (sc->sc_type == WM_T_I354) {
   12490 		uint32_t status;
   12491 
   12492 		status = CSR_READ(sc, WMREG_STATUS);
   12493 		if (((status & STATUS_2P5_SKU) != 0)
   12494 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12495 			ifmr->ifm_active |= IFM_2500_KX;
   12496 		} else
   12497 			ifmr->ifm_active |= IFM_1000_KX;
   12498 	} else {
   12499 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12500 		case PCS_LSTS_SPEED_10:
   12501 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12502 			break;
   12503 		case PCS_LSTS_SPEED_100:
   12504 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12505 			break;
   12506 		case PCS_LSTS_SPEED_1000:
   12507 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12508 			break;
   12509 		default:
   12510 			device_printf(sc->sc_dev, "Unknown speed\n");
   12511 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12512 			break;
   12513 		}
   12514 	}
   12515 	if ((reg & PCS_LSTS_FDX) != 0)
   12516 		ifmr->ifm_active |= IFM_FDX;
   12517 	else
   12518 		ifmr->ifm_active |= IFM_HDX;
   12519 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12520 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12521 		/* Check flow */
   12522 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12523 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12524 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12525 			goto setled;
   12526 		}
   12527 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12528 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12529 		DPRINTF(WM_DEBUG_LINK,
   12530 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12531 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12532 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12533 			mii->mii_media_active |= IFM_FLOW
   12534 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12535 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12536 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12537 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12538 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12539 			mii->mii_media_active |= IFM_FLOW
   12540 			    | IFM_ETH_TXPAUSE;
   12541 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12542 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12543 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12544 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12545 			mii->mii_media_active |= IFM_FLOW
   12546 			    | IFM_ETH_RXPAUSE;
   12547 		}
   12548 	}
   12549 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12550 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12551 setled:
   12552 	wm_tbi_serdes_set_linkled(sc);
   12553 }
   12554 
   12555 /*
   12556  * wm_serdes_tick:
   12557  *
   12558  *	Check the link on serdes devices.
   12559  */
   12560 static void
   12561 wm_serdes_tick(struct wm_softc *sc)
   12562 {
   12563 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12564 	struct mii_data *mii = &sc->sc_mii;
   12565 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12566 	uint32_t reg;
   12567 
   12568 	KASSERT(WM_CORE_LOCKED(sc));
   12569 
   12570 	mii->mii_media_status = IFM_AVALID;
   12571 	mii->mii_media_active = IFM_ETHER;
   12572 
   12573 	/* Check PCS */
   12574 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12575 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12576 		mii->mii_media_status |= IFM_ACTIVE;
   12577 		sc->sc_tbi_linkup = 1;
   12578 		sc->sc_tbi_serdes_ticks = 0;
   12579 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12580 		if ((reg & PCS_LSTS_FDX) != 0)
   12581 			mii->mii_media_active |= IFM_FDX;
   12582 		else
   12583 			mii->mii_media_active |= IFM_HDX;
   12584 	} else {
   12585 		mii->mii_media_status |= IFM_NONE;
   12586 		sc->sc_tbi_linkup = 0;
   12587 		/* If the timer expired, retry autonegotiation */
   12588 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12589 		    && (++sc->sc_tbi_serdes_ticks
   12590 			>= sc->sc_tbi_serdes_anegticks)) {
   12591 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12592 				device_xname(sc->sc_dev), __func__));
   12593 			sc->sc_tbi_serdes_ticks = 0;
   12594 			/* XXX */
   12595 			wm_serdes_mediachange(ifp);
   12596 		}
   12597 	}
   12598 
   12599 	wm_tbi_serdes_set_linkled(sc);
   12600 }
   12601 
   12602 /* SFP related */
   12603 
   12604 static int
   12605 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12606 {
   12607 	uint32_t i2ccmd;
   12608 	int i;
   12609 
   12610 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12611 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12612 
   12613 	/* Poll the ready bit */
   12614 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12615 		delay(50);
   12616 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12617 		if (i2ccmd & I2CCMD_READY)
   12618 			break;
   12619 	}
   12620 	if ((i2ccmd & I2CCMD_READY) == 0)
   12621 		return -1;
   12622 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12623 		return -1;
   12624 
   12625 	*data = i2ccmd & 0x00ff;
   12626 
   12627 	return 0;
   12628 }
   12629 
   12630 static uint32_t
   12631 wm_sfp_get_media_type(struct wm_softc *sc)
   12632 {
   12633 	uint32_t ctrl_ext;
   12634 	uint8_t val = 0;
   12635 	int timeout = 3;
   12636 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12637 	int rv = -1;
   12638 
   12639 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12640 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12641 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12642 	CSR_WRITE_FLUSH(sc);
   12643 
   12644 	/* Read SFP module data */
   12645 	while (timeout) {
   12646 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12647 		if (rv == 0)
   12648 			break;
   12649 		delay(100*1000); /* XXX too big */
   12650 		timeout--;
   12651 	}
   12652 	if (rv != 0)
   12653 		goto out;
   12654 
   12655 	switch (val) {
   12656 	case SFF_SFP_ID_SFF:
   12657 		aprint_normal_dev(sc->sc_dev,
   12658 		    "Module/Connector soldered to board\n");
   12659 		break;
   12660 	case SFF_SFP_ID_SFP:
   12661 		sc->sc_flags |= WM_F_SFP;
   12662 		break;
   12663 	case SFF_SFP_ID_UNKNOWN:
   12664 		goto out;
   12665 	default:
   12666 		break;
   12667 	}
   12668 
   12669 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12670 	if (rv != 0)
   12671 		goto out;
   12672 
   12673 	sc->sc_sfptype = val;
   12674 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12675 		mediatype = WM_MEDIATYPE_SERDES;
   12676 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12677 		sc->sc_flags |= WM_F_SGMII;
   12678 		mediatype = WM_MEDIATYPE_COPPER;
   12679 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12680 		sc->sc_flags |= WM_F_SGMII;
   12681 		mediatype = WM_MEDIATYPE_SERDES;
   12682 	} else {
   12683 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12684 		    __func__, sc->sc_sfptype);
   12685 		sc->sc_sfptype = 0; /* XXX unknown */
   12686 	}
   12687 
   12688 out:
   12689 	/* Restore I2C interface setting */
   12690 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12691 
   12692 	return mediatype;
   12693 }
   12694 
   12695 /*
   12696  * NVM related.
   12697  * Microwire, SPI (w/wo EERD) and Flash.
   12698  */
   12699 
   12700 /* Both spi and uwire */
   12701 
   12702 /*
   12703  * wm_eeprom_sendbits:
   12704  *
   12705  *	Send a series of bits to the EEPROM.
   12706  */
   12707 static void
   12708 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12709 {
   12710 	uint32_t reg;
   12711 	int x;
   12712 
   12713 	reg = CSR_READ(sc, WMREG_EECD);
   12714 
   12715 	for (x = nbits; x > 0; x--) {
   12716 		if (bits & (1U << (x - 1)))
   12717 			reg |= EECD_DI;
   12718 		else
   12719 			reg &= ~EECD_DI;
   12720 		CSR_WRITE(sc, WMREG_EECD, reg);
   12721 		CSR_WRITE_FLUSH(sc);
   12722 		delay(2);
   12723 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12724 		CSR_WRITE_FLUSH(sc);
   12725 		delay(2);
   12726 		CSR_WRITE(sc, WMREG_EECD, reg);
   12727 		CSR_WRITE_FLUSH(sc);
   12728 		delay(2);
   12729 	}
   12730 }
   12731 
   12732 /*
   12733  * wm_eeprom_recvbits:
   12734  *
   12735  *	Receive a series of bits from the EEPROM.
   12736  */
   12737 static void
   12738 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12739 {
   12740 	uint32_t reg, val;
   12741 	int x;
   12742 
   12743 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12744 
   12745 	val = 0;
   12746 	for (x = nbits; x > 0; x--) {
   12747 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12748 		CSR_WRITE_FLUSH(sc);
   12749 		delay(2);
   12750 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12751 			val |= (1U << (x - 1));
   12752 		CSR_WRITE(sc, WMREG_EECD, reg);
   12753 		CSR_WRITE_FLUSH(sc);
   12754 		delay(2);
   12755 	}
   12756 	*valp = val;
   12757 }
   12758 
   12759 /* Microwire */
   12760 
   12761 /*
   12762  * wm_nvm_read_uwire:
   12763  *
   12764  *	Read a word from the EEPROM using the MicroWire protocol.
   12765  */
   12766 static int
   12767 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12768 {
   12769 	uint32_t reg, val;
   12770 	int i;
   12771 
   12772 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12773 		device_xname(sc->sc_dev), __func__));
   12774 
   12775 	if (sc->nvm.acquire(sc) != 0)
   12776 		return -1;
   12777 
   12778 	for (i = 0; i < wordcnt; i++) {
   12779 		/* Clear SK and DI. */
   12780 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12781 		CSR_WRITE(sc, WMREG_EECD, reg);
   12782 
   12783 		/*
   12784 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12785 		 * and Xen.
   12786 		 *
   12787 		 * We use this workaround only for 82540 because qemu's
   12788 		 * e1000 act as 82540.
   12789 		 */
   12790 		if (sc->sc_type == WM_T_82540) {
   12791 			reg |= EECD_SK;
   12792 			CSR_WRITE(sc, WMREG_EECD, reg);
   12793 			reg &= ~EECD_SK;
   12794 			CSR_WRITE(sc, WMREG_EECD, reg);
   12795 			CSR_WRITE_FLUSH(sc);
   12796 			delay(2);
   12797 		}
   12798 		/* XXX: end of workaround */
   12799 
   12800 		/* Set CHIP SELECT. */
   12801 		reg |= EECD_CS;
   12802 		CSR_WRITE(sc, WMREG_EECD, reg);
   12803 		CSR_WRITE_FLUSH(sc);
   12804 		delay(2);
   12805 
   12806 		/* Shift in the READ command. */
   12807 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12808 
   12809 		/* Shift in address. */
   12810 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12811 
   12812 		/* Shift out the data. */
   12813 		wm_eeprom_recvbits(sc, &val, 16);
   12814 		data[i] = val & 0xffff;
   12815 
   12816 		/* Clear CHIP SELECT. */
   12817 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12818 		CSR_WRITE(sc, WMREG_EECD, reg);
   12819 		CSR_WRITE_FLUSH(sc);
   12820 		delay(2);
   12821 	}
   12822 
   12823 	sc->nvm.release(sc);
   12824 	return 0;
   12825 }
   12826 
   12827 /* SPI */
   12828 
   12829 /*
   12830  * Set SPI and FLASH related information from the EECD register.
   12831  * For 82541 and 82547, the word size is taken from EEPROM.
   12832  */
   12833 static int
   12834 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12835 {
   12836 	int size;
   12837 	uint32_t reg;
   12838 	uint16_t data;
   12839 
   12840 	reg = CSR_READ(sc, WMREG_EECD);
   12841 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12842 
   12843 	/* Read the size of NVM from EECD by default */
   12844 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12845 	switch (sc->sc_type) {
   12846 	case WM_T_82541:
   12847 	case WM_T_82541_2:
   12848 	case WM_T_82547:
   12849 	case WM_T_82547_2:
   12850 		/* Set dummy value to access EEPROM */
   12851 		sc->sc_nvm_wordsize = 64;
   12852 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12853 			aprint_error_dev(sc->sc_dev,
   12854 			    "%s: failed to read EEPROM size\n", __func__);
   12855 		}
   12856 		reg = data;
   12857 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12858 		if (size == 0)
   12859 			size = 6; /* 64 word size */
   12860 		else
   12861 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12862 		break;
   12863 	case WM_T_80003:
   12864 	case WM_T_82571:
   12865 	case WM_T_82572:
   12866 	case WM_T_82573: /* SPI case */
   12867 	case WM_T_82574: /* SPI case */
   12868 	case WM_T_82583: /* SPI case */
   12869 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12870 		if (size > 14)
   12871 			size = 14;
   12872 		break;
   12873 	case WM_T_82575:
   12874 	case WM_T_82576:
   12875 	case WM_T_82580:
   12876 	case WM_T_I350:
   12877 	case WM_T_I354:
   12878 	case WM_T_I210:
   12879 	case WM_T_I211:
   12880 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12881 		if (size > 15)
   12882 			size = 15;
   12883 		break;
   12884 	default:
   12885 		aprint_error_dev(sc->sc_dev,
   12886 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12887 		return -1;
   12888 		break;
   12889 	}
   12890 
   12891 	sc->sc_nvm_wordsize = 1 << size;
   12892 
   12893 	return 0;
   12894 }
   12895 
   12896 /*
   12897  * wm_nvm_ready_spi:
   12898  *
   12899  *	Wait for a SPI EEPROM to be ready for commands.
   12900  */
   12901 static int
   12902 wm_nvm_ready_spi(struct wm_softc *sc)
   12903 {
   12904 	uint32_t val;
   12905 	int usec;
   12906 
   12907 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12908 		device_xname(sc->sc_dev), __func__));
   12909 
   12910 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12911 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12912 		wm_eeprom_recvbits(sc, &val, 8);
   12913 		if ((val & SPI_SR_RDY) == 0)
   12914 			break;
   12915 	}
   12916 	if (usec >= SPI_MAX_RETRIES) {
   12917 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12918 		return -1;
   12919 	}
   12920 	return 0;
   12921 }
   12922 
   12923 /*
   12924  * wm_nvm_read_spi:
   12925  *
   12926  *	Read a work from the EEPROM using the SPI protocol.
   12927  */
   12928 static int
   12929 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12930 {
   12931 	uint32_t reg, val;
   12932 	int i;
   12933 	uint8_t opc;
   12934 	int rv = 0;
   12935 
   12936 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12937 		device_xname(sc->sc_dev), __func__));
   12938 
   12939 	if (sc->nvm.acquire(sc) != 0)
   12940 		return -1;
   12941 
   12942 	/* Clear SK and CS. */
   12943 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12944 	CSR_WRITE(sc, WMREG_EECD, reg);
   12945 	CSR_WRITE_FLUSH(sc);
   12946 	delay(2);
   12947 
   12948 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12949 		goto out;
   12950 
   12951 	/* Toggle CS to flush commands. */
   12952 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12953 	CSR_WRITE_FLUSH(sc);
   12954 	delay(2);
   12955 	CSR_WRITE(sc, WMREG_EECD, reg);
   12956 	CSR_WRITE_FLUSH(sc);
   12957 	delay(2);
   12958 
   12959 	opc = SPI_OPC_READ;
   12960 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12961 		opc |= SPI_OPC_A8;
   12962 
   12963 	wm_eeprom_sendbits(sc, opc, 8);
   12964 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12965 
   12966 	for (i = 0; i < wordcnt; i++) {
   12967 		wm_eeprom_recvbits(sc, &val, 16);
   12968 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12969 	}
   12970 
   12971 	/* Raise CS and clear SK. */
   12972 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12973 	CSR_WRITE(sc, WMREG_EECD, reg);
   12974 	CSR_WRITE_FLUSH(sc);
   12975 	delay(2);
   12976 
   12977 out:
   12978 	sc->nvm.release(sc);
   12979 	return rv;
   12980 }
   12981 
   12982 /* Using with EERD */
   12983 
   12984 static int
   12985 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12986 {
   12987 	uint32_t attempts = 100000;
   12988 	uint32_t i, reg = 0;
   12989 	int32_t done = -1;
   12990 
   12991 	for (i = 0; i < attempts; i++) {
   12992 		reg = CSR_READ(sc, rw);
   12993 
   12994 		if (reg & EERD_DONE) {
   12995 			done = 0;
   12996 			break;
   12997 		}
   12998 		delay(5);
   12999 	}
   13000 
   13001 	return done;
   13002 }
   13003 
   13004 static int
   13005 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13006 {
   13007 	int i, eerd = 0;
   13008 	int rv = 0;
   13009 
   13010 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13011 		device_xname(sc->sc_dev), __func__));
   13012 
   13013 	if (sc->nvm.acquire(sc) != 0)
   13014 		return -1;
   13015 
   13016 	for (i = 0; i < wordcnt; i++) {
   13017 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13018 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13019 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13020 		if (rv != 0) {
   13021 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13022 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13023 			break;
   13024 		}
   13025 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13026 	}
   13027 
   13028 	sc->nvm.release(sc);
   13029 	return rv;
   13030 }
   13031 
   13032 /* Flash */
   13033 
   13034 static int
   13035 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13036 {
   13037 	uint32_t eecd;
   13038 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13039 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13040 	uint32_t nvm_dword = 0;
   13041 	uint8_t sig_byte = 0;
   13042 	int rv;
   13043 
   13044 	switch (sc->sc_type) {
   13045 	case WM_T_PCH_SPT:
   13046 	case WM_T_PCH_CNP:
   13047 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13048 		act_offset = ICH_NVM_SIG_WORD * 2;
   13049 
   13050 		/* Set bank to 0 in case flash read fails. */
   13051 		*bank = 0;
   13052 
   13053 		/* Check bank 0 */
   13054 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13055 		if (rv != 0)
   13056 			return rv;
   13057 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13058 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13059 			*bank = 0;
   13060 			return 0;
   13061 		}
   13062 
   13063 		/* Check bank 1 */
   13064 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13065 		    &nvm_dword);
   13066 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13067 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13068 			*bank = 1;
   13069 			return 0;
   13070 		}
   13071 		aprint_error_dev(sc->sc_dev,
   13072 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13073 		return -1;
   13074 	case WM_T_ICH8:
   13075 	case WM_T_ICH9:
   13076 		eecd = CSR_READ(sc, WMREG_EECD);
   13077 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13078 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13079 			return 0;
   13080 		}
   13081 		/* FALLTHROUGH */
   13082 	default:
   13083 		/* Default to 0 */
   13084 		*bank = 0;
   13085 
   13086 		/* Check bank 0 */
   13087 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13088 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13089 			*bank = 0;
   13090 			return 0;
   13091 		}
   13092 
   13093 		/* Check bank 1 */
   13094 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13095 		    &sig_byte);
   13096 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13097 			*bank = 1;
   13098 			return 0;
   13099 		}
   13100 	}
   13101 
   13102 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13103 		device_xname(sc->sc_dev)));
   13104 	return -1;
   13105 }
   13106 
   13107 /******************************************************************************
   13108  * This function does initial flash setup so that a new read/write/erase cycle
   13109  * can be started.
   13110  *
   13111  * sc - The pointer to the hw structure
   13112  ****************************************************************************/
   13113 static int32_t
   13114 wm_ich8_cycle_init(struct wm_softc *sc)
   13115 {
   13116 	uint16_t hsfsts;
   13117 	int32_t error = 1;
   13118 	int32_t i     = 0;
   13119 
   13120 	if (sc->sc_type >= WM_T_PCH_SPT)
   13121 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13122 	else
   13123 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13124 
   13125 	/* May be check the Flash Des Valid bit in Hw status */
   13126 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13127 		return error;
   13128 
   13129 	/* Clear FCERR in Hw status by writing 1 */
   13130 	/* Clear DAEL in Hw status by writing a 1 */
   13131 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13132 
   13133 	if (sc->sc_type >= WM_T_PCH_SPT)
   13134 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13135 	else
   13136 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13137 
   13138 	/*
   13139 	 * Either we should have a hardware SPI cycle in progress bit to check
   13140 	 * against, in order to start a new cycle or FDONE bit should be
   13141 	 * changed in the hardware so that it is 1 after hardware reset, which
   13142 	 * can then be used as an indication whether a cycle is in progress or
   13143 	 * has been completed .. we should also have some software semaphore
   13144 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13145 	 * threads access to those bits can be sequentiallized or a way so that
   13146 	 * 2 threads don't start the cycle at the same time
   13147 	 */
   13148 
   13149 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13150 		/*
   13151 		 * There is no cycle running at present, so we can start a
   13152 		 * cycle
   13153 		 */
   13154 
   13155 		/* Begin by setting Flash Cycle Done. */
   13156 		hsfsts |= HSFSTS_DONE;
   13157 		if (sc->sc_type >= WM_T_PCH_SPT)
   13158 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13159 			    hsfsts & 0xffffUL);
   13160 		else
   13161 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13162 		error = 0;
   13163 	} else {
   13164 		/*
   13165 		 * Otherwise poll for sometime so the current cycle has a
   13166 		 * chance to end before giving up.
   13167 		 */
   13168 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13169 			if (sc->sc_type >= WM_T_PCH_SPT)
   13170 				hsfsts = ICH8_FLASH_READ32(sc,
   13171 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13172 			else
   13173 				hsfsts = ICH8_FLASH_READ16(sc,
   13174 				    ICH_FLASH_HSFSTS);
   13175 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13176 				error = 0;
   13177 				break;
   13178 			}
   13179 			delay(1);
   13180 		}
   13181 		if (error == 0) {
   13182 			/*
   13183 			 * Successful in waiting for previous cycle to timeout,
   13184 			 * now set the Flash Cycle Done.
   13185 			 */
   13186 			hsfsts |= HSFSTS_DONE;
   13187 			if (sc->sc_type >= WM_T_PCH_SPT)
   13188 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13189 				    hsfsts & 0xffffUL);
   13190 			else
   13191 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13192 				    hsfsts);
   13193 		}
   13194 	}
   13195 	return error;
   13196 }
   13197 
   13198 /******************************************************************************
   13199  * This function starts a flash cycle and waits for its completion
   13200  *
   13201  * sc - The pointer to the hw structure
   13202  ****************************************************************************/
   13203 static int32_t
   13204 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13205 {
   13206 	uint16_t hsflctl;
   13207 	uint16_t hsfsts;
   13208 	int32_t error = 1;
   13209 	uint32_t i = 0;
   13210 
   13211 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13212 	if (sc->sc_type >= WM_T_PCH_SPT)
   13213 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13214 	else
   13215 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13216 	hsflctl |= HSFCTL_GO;
   13217 	if (sc->sc_type >= WM_T_PCH_SPT)
   13218 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13219 		    (uint32_t)hsflctl << 16);
   13220 	else
   13221 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13222 
   13223 	/* Wait till FDONE bit is set to 1 */
   13224 	do {
   13225 		if (sc->sc_type >= WM_T_PCH_SPT)
   13226 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13227 			    & 0xffffUL;
   13228 		else
   13229 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13230 		if (hsfsts & HSFSTS_DONE)
   13231 			break;
   13232 		delay(1);
   13233 		i++;
   13234 	} while (i < timeout);
   13235 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13236 		error = 0;
   13237 
   13238 	return error;
   13239 }
   13240 
   13241 /******************************************************************************
   13242  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13243  *
   13244  * sc - The pointer to the hw structure
   13245  * index - The index of the byte or word to read.
   13246  * size - Size of data to read, 1=byte 2=word, 4=dword
   13247  * data - Pointer to the word to store the value read.
   13248  *****************************************************************************/
   13249 static int32_t
   13250 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13251     uint32_t size, uint32_t *data)
   13252 {
   13253 	uint16_t hsfsts;
   13254 	uint16_t hsflctl;
   13255 	uint32_t flash_linear_address;
   13256 	uint32_t flash_data = 0;
   13257 	int32_t error = 1;
   13258 	int32_t count = 0;
   13259 
   13260 	if (size < 1  || size > 4 || data == 0x0 ||
   13261 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13262 		return error;
   13263 
   13264 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13265 	    sc->sc_ich8_flash_base;
   13266 
   13267 	do {
   13268 		delay(1);
   13269 		/* Steps */
   13270 		error = wm_ich8_cycle_init(sc);
   13271 		if (error)
   13272 			break;
   13273 
   13274 		if (sc->sc_type >= WM_T_PCH_SPT)
   13275 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13276 			    >> 16;
   13277 		else
   13278 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13279 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13280 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13281 		    & HSFCTL_BCOUNT_MASK;
   13282 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13283 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13284 			/*
   13285 			 * In SPT, This register is in Lan memory space, not
   13286 			 * flash. Therefore, only 32 bit access is supported.
   13287 			 */
   13288 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13289 			    (uint32_t)hsflctl << 16);
   13290 		} else
   13291 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13292 
   13293 		/*
   13294 		 * Write the last 24 bits of index into Flash Linear address
   13295 		 * field in Flash Address
   13296 		 */
   13297 		/* TODO: TBD maybe check the index against the size of flash */
   13298 
   13299 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13300 
   13301 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13302 
   13303 		/*
   13304 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13305 		 * the whole sequence a few more times, else read in (shift in)
   13306 		 * the Flash Data0, the order is least significant byte first
   13307 		 * msb to lsb
   13308 		 */
   13309 		if (error == 0) {
   13310 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13311 			if (size == 1)
   13312 				*data = (uint8_t)(flash_data & 0x000000FF);
   13313 			else if (size == 2)
   13314 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13315 			else if (size == 4)
   13316 				*data = (uint32_t)flash_data;
   13317 			break;
   13318 		} else {
   13319 			/*
   13320 			 * If we've gotten here, then things are probably
   13321 			 * completely hosed, but if the error condition is
   13322 			 * detected, it won't hurt to give it another try...
   13323 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13324 			 */
   13325 			if (sc->sc_type >= WM_T_PCH_SPT)
   13326 				hsfsts = ICH8_FLASH_READ32(sc,
   13327 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13328 			else
   13329 				hsfsts = ICH8_FLASH_READ16(sc,
   13330 				    ICH_FLASH_HSFSTS);
   13331 
   13332 			if (hsfsts & HSFSTS_ERR) {
   13333 				/* Repeat for some time before giving up. */
   13334 				continue;
   13335 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13336 				break;
   13337 		}
   13338 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13339 
   13340 	return error;
   13341 }
   13342 
   13343 /******************************************************************************
   13344  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13345  *
   13346  * sc - pointer to wm_hw structure
   13347  * index - The index of the byte to read.
   13348  * data - Pointer to a byte to store the value read.
   13349  *****************************************************************************/
   13350 static int32_t
   13351 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13352 {
   13353 	int32_t status;
   13354 	uint32_t word = 0;
   13355 
   13356 	status = wm_read_ich8_data(sc, index, 1, &word);
   13357 	if (status == 0)
   13358 		*data = (uint8_t)word;
   13359 	else
   13360 		*data = 0;
   13361 
   13362 	return status;
   13363 }
   13364 
   13365 /******************************************************************************
   13366  * Reads a word from the NVM using the ICH8 flash access registers.
   13367  *
   13368  * sc - pointer to wm_hw structure
   13369  * index - The starting byte index of the word to read.
   13370  * data - Pointer to a word to store the value read.
   13371  *****************************************************************************/
   13372 static int32_t
   13373 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13374 {
   13375 	int32_t status;
   13376 	uint32_t word = 0;
   13377 
   13378 	status = wm_read_ich8_data(sc, index, 2, &word);
   13379 	if (status == 0)
   13380 		*data = (uint16_t)word;
   13381 	else
   13382 		*data = 0;
   13383 
   13384 	return status;
   13385 }
   13386 
   13387 /******************************************************************************
   13388  * Reads a dword from the NVM using the ICH8 flash access registers.
   13389  *
   13390  * sc - pointer to wm_hw structure
   13391  * index - The starting byte index of the word to read.
   13392  * data - Pointer to a word to store the value read.
   13393  *****************************************************************************/
   13394 static int32_t
   13395 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13396 {
   13397 	int32_t status;
   13398 
   13399 	status = wm_read_ich8_data(sc, index, 4, data);
   13400 	return status;
   13401 }
   13402 
   13403 /******************************************************************************
   13404  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13405  * register.
   13406  *
   13407  * sc - Struct containing variables accessed by shared code
   13408  * offset - offset of word in the EEPROM to read
   13409  * data - word read from the EEPROM
   13410  * words - number of words to read
   13411  *****************************************************************************/
   13412 static int
   13413 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13414 {
   13415 	int32_t	 rv = 0;
   13416 	uint32_t flash_bank = 0;
   13417 	uint32_t act_offset = 0;
   13418 	uint32_t bank_offset = 0;
   13419 	uint16_t word = 0;
   13420 	uint16_t i = 0;
   13421 
   13422 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13423 		device_xname(sc->sc_dev), __func__));
   13424 
   13425 	if (sc->nvm.acquire(sc) != 0)
   13426 		return -1;
   13427 
   13428 	/*
   13429 	 * We need to know which is the valid flash bank.  In the event
   13430 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13431 	 * managing flash_bank. So it cannot be trusted and needs
   13432 	 * to be updated with each read.
   13433 	 */
   13434 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13435 	if (rv) {
   13436 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13437 			device_xname(sc->sc_dev)));
   13438 		flash_bank = 0;
   13439 	}
   13440 
   13441 	/*
   13442 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13443 	 * size
   13444 	 */
   13445 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13446 
   13447 	for (i = 0; i < words; i++) {
   13448 		/* The NVM part needs a byte offset, hence * 2 */
   13449 		act_offset = bank_offset + ((offset + i) * 2);
   13450 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13451 		if (rv) {
   13452 			aprint_error_dev(sc->sc_dev,
   13453 			    "%s: failed to read NVM\n", __func__);
   13454 			break;
   13455 		}
   13456 		data[i] = word;
   13457 	}
   13458 
   13459 	sc->nvm.release(sc);
   13460 	return rv;
   13461 }
   13462 
   13463 /******************************************************************************
   13464  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13465  * register.
   13466  *
   13467  * sc - Struct containing variables accessed by shared code
   13468  * offset - offset of word in the EEPROM to read
   13469  * data - word read from the EEPROM
   13470  * words - number of words to read
   13471  *****************************************************************************/
   13472 static int
   13473 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13474 {
   13475 	int32_t	 rv = 0;
   13476 	uint32_t flash_bank = 0;
   13477 	uint32_t act_offset = 0;
   13478 	uint32_t bank_offset = 0;
   13479 	uint32_t dword = 0;
   13480 	uint16_t i = 0;
   13481 
   13482 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13483 		device_xname(sc->sc_dev), __func__));
   13484 
   13485 	if (sc->nvm.acquire(sc) != 0)
   13486 		return -1;
   13487 
   13488 	/*
   13489 	 * We need to know which is the valid flash bank.  In the event
   13490 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13491 	 * managing flash_bank. So it cannot be trusted and needs
   13492 	 * to be updated with each read.
   13493 	 */
   13494 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13495 	if (rv) {
   13496 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13497 			device_xname(sc->sc_dev)));
   13498 		flash_bank = 0;
   13499 	}
   13500 
   13501 	/*
   13502 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13503 	 * size
   13504 	 */
   13505 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13506 
   13507 	for (i = 0; i < words; i++) {
   13508 		/* The NVM part needs a byte offset, hence * 2 */
   13509 		act_offset = bank_offset + ((offset + i) * 2);
   13510 		/* but we must read dword aligned, so mask ... */
   13511 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13512 		if (rv) {
   13513 			aprint_error_dev(sc->sc_dev,
   13514 			    "%s: failed to read NVM\n", __func__);
   13515 			break;
   13516 		}
   13517 		/* ... and pick out low or high word */
   13518 		if ((act_offset & 0x2) == 0)
   13519 			data[i] = (uint16_t)(dword & 0xFFFF);
   13520 		else
   13521 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13522 	}
   13523 
   13524 	sc->nvm.release(sc);
   13525 	return rv;
   13526 }
   13527 
   13528 /* iNVM */
   13529 
   13530 static int
   13531 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13532 {
   13533 	int32_t	 rv = 0;
   13534 	uint32_t invm_dword;
   13535 	uint16_t i;
   13536 	uint8_t record_type, word_address;
   13537 
   13538 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13539 		device_xname(sc->sc_dev), __func__));
   13540 
   13541 	for (i = 0; i < INVM_SIZE; i++) {
   13542 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13543 		/* Get record type */
   13544 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13545 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13546 			break;
   13547 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13548 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13549 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13550 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13551 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13552 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13553 			if (word_address == address) {
   13554 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13555 				rv = 0;
   13556 				break;
   13557 			}
   13558 		}
   13559 	}
   13560 
   13561 	return rv;
   13562 }
   13563 
   13564 static int
   13565 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13566 {
   13567 	int rv = 0;
   13568 	int i;
   13569 
   13570 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13571 		device_xname(sc->sc_dev), __func__));
   13572 
   13573 	if (sc->nvm.acquire(sc) != 0)
   13574 		return -1;
   13575 
   13576 	for (i = 0; i < words; i++) {
   13577 		switch (offset + i) {
   13578 		case NVM_OFF_MACADDR:
   13579 		case NVM_OFF_MACADDR1:
   13580 		case NVM_OFF_MACADDR2:
   13581 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13582 			if (rv != 0) {
   13583 				data[i] = 0xffff;
   13584 				rv = -1;
   13585 			}
   13586 			break;
   13587 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13588 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13589 			if (rv != 0) {
   13590 				*data = INVM_DEFAULT_AL;
   13591 				rv = 0;
   13592 			}
   13593 			break;
   13594 		case NVM_OFF_CFG2:
   13595 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13596 			if (rv != 0) {
   13597 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13598 				rv = 0;
   13599 			}
   13600 			break;
   13601 		case NVM_OFF_CFG4:
   13602 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13603 			if (rv != 0) {
   13604 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13605 				rv = 0;
   13606 			}
   13607 			break;
   13608 		case NVM_OFF_LED_1_CFG:
   13609 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13610 			if (rv != 0) {
   13611 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13612 				rv = 0;
   13613 			}
   13614 			break;
   13615 		case NVM_OFF_LED_0_2_CFG:
   13616 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13617 			if (rv != 0) {
   13618 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13619 				rv = 0;
   13620 			}
   13621 			break;
   13622 		case NVM_OFF_ID_LED_SETTINGS:
   13623 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13624 			if (rv != 0) {
   13625 				*data = ID_LED_RESERVED_FFFF;
   13626 				rv = 0;
   13627 			}
   13628 			break;
   13629 		default:
   13630 			DPRINTF(WM_DEBUG_NVM,
   13631 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13632 			*data = NVM_RESERVED_WORD;
   13633 			break;
   13634 		}
   13635 	}
   13636 
   13637 	sc->nvm.release(sc);
   13638 	return rv;
   13639 }
   13640 
   13641 /* Lock, detecting NVM type, validate checksum, version and read */
   13642 
   13643 static int
   13644 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13645 {
   13646 	uint32_t eecd = 0;
   13647 
   13648 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13649 	    || sc->sc_type == WM_T_82583) {
   13650 		eecd = CSR_READ(sc, WMREG_EECD);
   13651 
   13652 		/* Isolate bits 15 & 16 */
   13653 		eecd = ((eecd >> 15) & 0x03);
   13654 
   13655 		/* If both bits are set, device is Flash type */
   13656 		if (eecd == 0x03)
   13657 			return 0;
   13658 	}
   13659 	return 1;
   13660 }
   13661 
   13662 static int
   13663 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13664 {
   13665 	uint32_t eec;
   13666 
   13667 	eec = CSR_READ(sc, WMREG_EEC);
   13668 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13669 		return 1;
   13670 
   13671 	return 0;
   13672 }
   13673 
   13674 /*
   13675  * wm_nvm_validate_checksum
   13676  *
   13677  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13678  */
   13679 static int
   13680 wm_nvm_validate_checksum(struct wm_softc *sc)
   13681 {
   13682 	uint16_t checksum;
   13683 	uint16_t eeprom_data;
   13684 #ifdef WM_DEBUG
   13685 	uint16_t csum_wordaddr, valid_checksum;
   13686 #endif
   13687 	int i;
   13688 
   13689 	checksum = 0;
   13690 
   13691 	/* Don't check for I211 */
   13692 	if (sc->sc_type == WM_T_I211)
   13693 		return 0;
   13694 
   13695 #ifdef WM_DEBUG
   13696 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13697 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13698 		csum_wordaddr = NVM_OFF_COMPAT;
   13699 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13700 	} else {
   13701 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13702 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13703 	}
   13704 
   13705 	/* Dump EEPROM image for debug */
   13706 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13707 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13708 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13709 		/* XXX PCH_SPT? */
   13710 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13711 		if ((eeprom_data & valid_checksum) == 0)
   13712 			DPRINTF(WM_DEBUG_NVM,
   13713 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13714 				device_xname(sc->sc_dev), eeprom_data,
   13715 				    valid_checksum));
   13716 	}
   13717 
   13718 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13719 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13720 		for (i = 0; i < NVM_SIZE; i++) {
   13721 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13722 				printf("XXXX ");
   13723 			else
   13724 				printf("%04hx ", eeprom_data);
   13725 			if (i % 8 == 7)
   13726 				printf("\n");
   13727 		}
   13728 	}
   13729 
   13730 #endif /* WM_DEBUG */
   13731 
   13732 	for (i = 0; i < NVM_SIZE; i++) {
   13733 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13734 			return 1;
   13735 		checksum += eeprom_data;
   13736 	}
   13737 
   13738 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13739 #ifdef WM_DEBUG
   13740 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13741 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13742 #endif
   13743 	}
   13744 
   13745 	return 0;
   13746 }
   13747 
   13748 static void
   13749 wm_nvm_version_invm(struct wm_softc *sc)
   13750 {
   13751 	uint32_t dword;
   13752 
   13753 	/*
   13754 	 * Linux's code to decode version is very strange, so we don't
   13755 	 * obey that algorithm and just use word 61 as the document.
   13756 	 * Perhaps it's not perfect though...
   13757 	 *
   13758 	 * Example:
   13759 	 *
   13760 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13761 	 */
   13762 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13763 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13764 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13765 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13766 }
   13767 
   13768 static void
   13769 wm_nvm_version(struct wm_softc *sc)
   13770 {
   13771 	uint16_t major, minor, build, patch;
   13772 	uint16_t uid0, uid1;
   13773 	uint16_t nvm_data;
   13774 	uint16_t off;
   13775 	bool check_version = false;
   13776 	bool check_optionrom = false;
   13777 	bool have_build = false;
   13778 	bool have_uid = true;
   13779 
   13780 	/*
   13781 	 * Version format:
   13782 	 *
   13783 	 * XYYZ
   13784 	 * X0YZ
   13785 	 * X0YY
   13786 	 *
   13787 	 * Example:
   13788 	 *
   13789 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13790 	 *	82571	0x50a6	5.10.6?
   13791 	 *	82572	0x506a	5.6.10?
   13792 	 *	82572EI	0x5069	5.6.9?
   13793 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13794 	 *		0x2013	2.1.3?
   13795 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13796 	 * ICH8+82567	0x0040	0.4.0?
   13797 	 * ICH9+82566	0x1040	1.4.0?
   13798 	 *ICH10+82567	0x0043	0.4.3?
   13799 	 *  PCH+82577	0x00c1	0.12.1?
   13800 	 * PCH2+82579	0x00d3	0.13.3?
   13801 	 *		0x00d4	0.13.4?
   13802 	 *  LPT+I218	0x0023	0.2.3?
   13803 	 *  SPT+I219	0x0084	0.8.4?
   13804 	 *  CNP+I219	0x0054	0.5.4?
   13805 	 */
   13806 
   13807 	/*
   13808 	 * XXX
   13809 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13810 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13811 	 */
   13812 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13813 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13814 		have_uid = false;
   13815 
   13816 	switch (sc->sc_type) {
   13817 	case WM_T_82571:
   13818 	case WM_T_82572:
   13819 	case WM_T_82574:
   13820 	case WM_T_82583:
   13821 		check_version = true;
   13822 		check_optionrom = true;
   13823 		have_build = true;
   13824 		break;
   13825 	case WM_T_ICH8:
   13826 	case WM_T_ICH9:
   13827 	case WM_T_ICH10:
   13828 	case WM_T_PCH:
   13829 	case WM_T_PCH2:
   13830 	case WM_T_PCH_LPT:
   13831 	case WM_T_PCH_SPT:
   13832 	case WM_T_PCH_CNP:
   13833 		check_version = true;
   13834 		have_build = true;
   13835 		have_uid = false;
   13836 		break;
   13837 	case WM_T_82575:
   13838 	case WM_T_82576:
   13839 	case WM_T_82580:
   13840 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13841 			check_version = true;
   13842 		break;
   13843 	case WM_T_I211:
   13844 		wm_nvm_version_invm(sc);
   13845 		have_uid = false;
   13846 		goto printver;
   13847 	case WM_T_I210:
   13848 		if (!wm_nvm_flash_presence_i210(sc)) {
   13849 			wm_nvm_version_invm(sc);
   13850 			have_uid = false;
   13851 			goto printver;
   13852 		}
   13853 		/* FALLTHROUGH */
   13854 	case WM_T_I350:
   13855 	case WM_T_I354:
   13856 		check_version = true;
   13857 		check_optionrom = true;
   13858 		break;
   13859 	default:
   13860 		return;
   13861 	}
   13862 	if (check_version
   13863 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13864 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13865 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13866 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13867 			build = nvm_data & NVM_BUILD_MASK;
   13868 			have_build = true;
   13869 		} else
   13870 			minor = nvm_data & 0x00ff;
   13871 
   13872 		/* Decimal */
   13873 		minor = (minor / 16) * 10 + (minor % 16);
   13874 		sc->sc_nvm_ver_major = major;
   13875 		sc->sc_nvm_ver_minor = minor;
   13876 
   13877 printver:
   13878 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13879 		    sc->sc_nvm_ver_minor);
   13880 		if (have_build) {
   13881 			sc->sc_nvm_ver_build = build;
   13882 			aprint_verbose(".%d", build);
   13883 		}
   13884 	}
   13885 
   13886 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13887 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13888 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13889 		/* Option ROM Version */
   13890 		if ((off != 0x0000) && (off != 0xffff)) {
   13891 			int rv;
   13892 
   13893 			off += NVM_COMBO_VER_OFF;
   13894 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13895 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13896 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13897 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13898 				/* 16bits */
   13899 				major = uid0 >> 8;
   13900 				build = (uid0 << 8) | (uid1 >> 8);
   13901 				patch = uid1 & 0x00ff;
   13902 				aprint_verbose(", option ROM Version %d.%d.%d",
   13903 				    major, build, patch);
   13904 			}
   13905 		}
   13906 	}
   13907 
   13908 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13909 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13910 }
   13911 
   13912 /*
   13913  * wm_nvm_read:
   13914  *
   13915  *	Read data from the serial EEPROM.
   13916  */
   13917 static int
   13918 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13919 {
   13920 	int rv;
   13921 
   13922 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13923 		device_xname(sc->sc_dev), __func__));
   13924 
   13925 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13926 		return -1;
   13927 
   13928 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13929 
   13930 	return rv;
   13931 }
   13932 
   13933 /*
   13934  * Hardware semaphores.
   13935  * Very complexed...
   13936  */
   13937 
   13938 static int
   13939 wm_get_null(struct wm_softc *sc)
   13940 {
   13941 
   13942 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13943 		device_xname(sc->sc_dev), __func__));
   13944 	return 0;
   13945 }
   13946 
   13947 static void
   13948 wm_put_null(struct wm_softc *sc)
   13949 {
   13950 
   13951 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13952 		device_xname(sc->sc_dev), __func__));
   13953 	return;
   13954 }
   13955 
   13956 static int
   13957 wm_get_eecd(struct wm_softc *sc)
   13958 {
   13959 	uint32_t reg;
   13960 	int x;
   13961 
   13962 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13963 		device_xname(sc->sc_dev), __func__));
   13964 
   13965 	reg = CSR_READ(sc, WMREG_EECD);
   13966 
   13967 	/* Request EEPROM access. */
   13968 	reg |= EECD_EE_REQ;
   13969 	CSR_WRITE(sc, WMREG_EECD, reg);
   13970 
   13971 	/* ..and wait for it to be granted. */
   13972 	for (x = 0; x < 1000; x++) {
   13973 		reg = CSR_READ(sc, WMREG_EECD);
   13974 		if (reg & EECD_EE_GNT)
   13975 			break;
   13976 		delay(5);
   13977 	}
   13978 	if ((reg & EECD_EE_GNT) == 0) {
   13979 		aprint_error_dev(sc->sc_dev,
   13980 		    "could not acquire EEPROM GNT\n");
   13981 		reg &= ~EECD_EE_REQ;
   13982 		CSR_WRITE(sc, WMREG_EECD, reg);
   13983 		return -1;
   13984 	}
   13985 
   13986 	return 0;
   13987 }
   13988 
   13989 static void
   13990 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13991 {
   13992 
   13993 	*eecd |= EECD_SK;
   13994 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13995 	CSR_WRITE_FLUSH(sc);
   13996 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13997 		delay(1);
   13998 	else
   13999 		delay(50);
   14000 }
   14001 
   14002 static void
   14003 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14004 {
   14005 
   14006 	*eecd &= ~EECD_SK;
   14007 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14008 	CSR_WRITE_FLUSH(sc);
   14009 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14010 		delay(1);
   14011 	else
   14012 		delay(50);
   14013 }
   14014 
   14015 static void
   14016 wm_put_eecd(struct wm_softc *sc)
   14017 {
   14018 	uint32_t reg;
   14019 
   14020 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14021 		device_xname(sc->sc_dev), __func__));
   14022 
   14023 	/* Stop nvm */
   14024 	reg = CSR_READ(sc, WMREG_EECD);
   14025 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14026 		/* Pull CS high */
   14027 		reg |= EECD_CS;
   14028 		wm_nvm_eec_clock_lower(sc, &reg);
   14029 	} else {
   14030 		/* CS on Microwire is active-high */
   14031 		reg &= ~(EECD_CS | EECD_DI);
   14032 		CSR_WRITE(sc, WMREG_EECD, reg);
   14033 		wm_nvm_eec_clock_raise(sc, &reg);
   14034 		wm_nvm_eec_clock_lower(sc, &reg);
   14035 	}
   14036 
   14037 	reg = CSR_READ(sc, WMREG_EECD);
   14038 	reg &= ~EECD_EE_REQ;
   14039 	CSR_WRITE(sc, WMREG_EECD, reg);
   14040 
   14041 	return;
   14042 }
   14043 
   14044 /*
   14045  * Get hardware semaphore.
   14046  * Same as e1000_get_hw_semaphore_generic()
   14047  */
   14048 static int
   14049 wm_get_swsm_semaphore(struct wm_softc *sc)
   14050 {
   14051 	int32_t timeout;
   14052 	uint32_t swsm;
   14053 
   14054 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14055 		device_xname(sc->sc_dev), __func__));
   14056 	KASSERT(sc->sc_nvm_wordsize > 0);
   14057 
   14058 retry:
   14059 	/* Get the SW semaphore. */
   14060 	timeout = sc->sc_nvm_wordsize + 1;
   14061 	while (timeout) {
   14062 		swsm = CSR_READ(sc, WMREG_SWSM);
   14063 
   14064 		if ((swsm & SWSM_SMBI) == 0)
   14065 			break;
   14066 
   14067 		delay(50);
   14068 		timeout--;
   14069 	}
   14070 
   14071 	if (timeout == 0) {
   14072 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14073 			/*
   14074 			 * In rare circumstances, the SW semaphore may already
   14075 			 * be held unintentionally. Clear the semaphore once
   14076 			 * before giving up.
   14077 			 */
   14078 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14079 			wm_put_swsm_semaphore(sc);
   14080 			goto retry;
   14081 		}
   14082 		aprint_error_dev(sc->sc_dev,
   14083 		    "could not acquire SWSM SMBI\n");
   14084 		return 1;
   14085 	}
   14086 
   14087 	/* Get the FW semaphore. */
   14088 	timeout = sc->sc_nvm_wordsize + 1;
   14089 	while (timeout) {
   14090 		swsm = CSR_READ(sc, WMREG_SWSM);
   14091 		swsm |= SWSM_SWESMBI;
   14092 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14093 		/* If we managed to set the bit we got the semaphore. */
   14094 		swsm = CSR_READ(sc, WMREG_SWSM);
   14095 		if (swsm & SWSM_SWESMBI)
   14096 			break;
   14097 
   14098 		delay(50);
   14099 		timeout--;
   14100 	}
   14101 
   14102 	if (timeout == 0) {
   14103 		aprint_error_dev(sc->sc_dev,
   14104 		    "could not acquire SWSM SWESMBI\n");
   14105 		/* Release semaphores */
   14106 		wm_put_swsm_semaphore(sc);
   14107 		return 1;
   14108 	}
   14109 	return 0;
   14110 }
   14111 
   14112 /*
   14113  * Put hardware semaphore.
   14114  * Same as e1000_put_hw_semaphore_generic()
   14115  */
   14116 static void
   14117 wm_put_swsm_semaphore(struct wm_softc *sc)
   14118 {
   14119 	uint32_t swsm;
   14120 
   14121 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14122 		device_xname(sc->sc_dev), __func__));
   14123 
   14124 	swsm = CSR_READ(sc, WMREG_SWSM);
   14125 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14126 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14127 }
   14128 
   14129 /*
   14130  * Get SW/FW semaphore.
   14131  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14132  */
   14133 static int
   14134 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14135 {
   14136 	uint32_t swfw_sync;
   14137 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14138 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14139 	int timeout;
   14140 
   14141 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14142 		device_xname(sc->sc_dev), __func__));
   14143 
   14144 	if (sc->sc_type == WM_T_80003)
   14145 		timeout = 50;
   14146 	else
   14147 		timeout = 200;
   14148 
   14149 	while (timeout) {
   14150 		if (wm_get_swsm_semaphore(sc)) {
   14151 			aprint_error_dev(sc->sc_dev,
   14152 			    "%s: failed to get semaphore\n",
   14153 			    __func__);
   14154 			return 1;
   14155 		}
   14156 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14157 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14158 			swfw_sync |= swmask;
   14159 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14160 			wm_put_swsm_semaphore(sc);
   14161 			return 0;
   14162 		}
   14163 		wm_put_swsm_semaphore(sc);
   14164 		delay(5000);
   14165 		timeout--;
   14166 	}
   14167 	device_printf(sc->sc_dev,
   14168 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14169 	    mask, swfw_sync);
   14170 	return 1;
   14171 }
   14172 
   14173 static void
   14174 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14175 {
   14176 	uint32_t swfw_sync;
   14177 
   14178 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14179 		device_xname(sc->sc_dev), __func__));
   14180 
   14181 	while (wm_get_swsm_semaphore(sc) != 0)
   14182 		continue;
   14183 
   14184 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14185 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14186 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14187 
   14188 	wm_put_swsm_semaphore(sc);
   14189 }
   14190 
   14191 static int
   14192 wm_get_nvm_80003(struct wm_softc *sc)
   14193 {
   14194 	int rv;
   14195 
   14196 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14197 		device_xname(sc->sc_dev), __func__));
   14198 
   14199 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14200 		aprint_error_dev(sc->sc_dev,
   14201 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14202 		return rv;
   14203 	}
   14204 
   14205 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14206 	    && (rv = wm_get_eecd(sc)) != 0) {
   14207 		aprint_error_dev(sc->sc_dev,
   14208 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14209 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14210 		return rv;
   14211 	}
   14212 
   14213 	return 0;
   14214 }
   14215 
   14216 static void
   14217 wm_put_nvm_80003(struct wm_softc *sc)
   14218 {
   14219 
   14220 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14221 		device_xname(sc->sc_dev), __func__));
   14222 
   14223 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14224 		wm_put_eecd(sc);
   14225 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14226 }
   14227 
   14228 static int
   14229 wm_get_nvm_82571(struct wm_softc *sc)
   14230 {
   14231 	int rv;
   14232 
   14233 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14234 		device_xname(sc->sc_dev), __func__));
   14235 
   14236 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14237 		return rv;
   14238 
   14239 	switch (sc->sc_type) {
   14240 	case WM_T_82573:
   14241 		break;
   14242 	default:
   14243 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14244 			rv = wm_get_eecd(sc);
   14245 		break;
   14246 	}
   14247 
   14248 	if (rv != 0) {
   14249 		aprint_error_dev(sc->sc_dev,
   14250 		    "%s: failed to get semaphore\n",
   14251 		    __func__);
   14252 		wm_put_swsm_semaphore(sc);
   14253 	}
   14254 
   14255 	return rv;
   14256 }
   14257 
   14258 static void
   14259 wm_put_nvm_82571(struct wm_softc *sc)
   14260 {
   14261 
   14262 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14263 		device_xname(sc->sc_dev), __func__));
   14264 
   14265 	switch (sc->sc_type) {
   14266 	case WM_T_82573:
   14267 		break;
   14268 	default:
   14269 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14270 			wm_put_eecd(sc);
   14271 		break;
   14272 	}
   14273 
   14274 	wm_put_swsm_semaphore(sc);
   14275 }
   14276 
   14277 static int
   14278 wm_get_phy_82575(struct wm_softc *sc)
   14279 {
   14280 
   14281 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14282 		device_xname(sc->sc_dev), __func__));
   14283 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14284 }
   14285 
   14286 static void
   14287 wm_put_phy_82575(struct wm_softc *sc)
   14288 {
   14289 
   14290 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14291 		device_xname(sc->sc_dev), __func__));
   14292 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14293 }
   14294 
   14295 static int
   14296 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14297 {
   14298 	uint32_t ext_ctrl;
   14299 	int timeout = 200;
   14300 
   14301 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14302 		device_xname(sc->sc_dev), __func__));
   14303 
   14304 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14305 	for (timeout = 0; timeout < 200; timeout++) {
   14306 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14307 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14308 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14309 
   14310 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14311 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14312 			return 0;
   14313 		delay(5000);
   14314 	}
   14315 	device_printf(sc->sc_dev,
   14316 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14317 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14318 	return 1;
   14319 }
   14320 
   14321 static void
   14322 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14323 {
   14324 	uint32_t ext_ctrl;
   14325 
   14326 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14327 		device_xname(sc->sc_dev), __func__));
   14328 
   14329 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14330 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14331 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14332 
   14333 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14334 }
   14335 
   14336 static int
   14337 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14338 {
   14339 	uint32_t ext_ctrl;
   14340 	int timeout;
   14341 
   14342 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14343 		device_xname(sc->sc_dev), __func__));
   14344 	mutex_enter(sc->sc_ich_phymtx);
   14345 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14346 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14347 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14348 			break;
   14349 		delay(1000);
   14350 	}
   14351 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14352 		device_printf(sc->sc_dev,
   14353 		    "SW has already locked the resource\n");
   14354 		goto out;
   14355 	}
   14356 
   14357 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14358 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14359 	for (timeout = 0; timeout < 1000; timeout++) {
   14360 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14361 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14362 			break;
   14363 		delay(1000);
   14364 	}
   14365 	if (timeout >= 1000) {
   14366 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14367 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14368 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14369 		goto out;
   14370 	}
   14371 	return 0;
   14372 
   14373 out:
   14374 	mutex_exit(sc->sc_ich_phymtx);
   14375 	return 1;
   14376 }
   14377 
   14378 static void
   14379 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14380 {
   14381 	uint32_t ext_ctrl;
   14382 
   14383 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14384 		device_xname(sc->sc_dev), __func__));
   14385 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14386 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14387 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14388 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14389 	} else {
   14390 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14391 	}
   14392 
   14393 	mutex_exit(sc->sc_ich_phymtx);
   14394 }
   14395 
   14396 static int
   14397 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14398 {
   14399 
   14400 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14401 		device_xname(sc->sc_dev), __func__));
   14402 	mutex_enter(sc->sc_ich_nvmmtx);
   14403 
   14404 	return 0;
   14405 }
   14406 
   14407 static void
   14408 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14409 {
   14410 
   14411 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14412 		device_xname(sc->sc_dev), __func__));
   14413 	mutex_exit(sc->sc_ich_nvmmtx);
   14414 }
   14415 
   14416 static int
   14417 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14418 {
   14419 	int i = 0;
   14420 	uint32_t reg;
   14421 
   14422 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14423 		device_xname(sc->sc_dev), __func__));
   14424 
   14425 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14426 	do {
   14427 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14428 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14429 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14430 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14431 			break;
   14432 		delay(2*1000);
   14433 		i++;
   14434 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14435 
   14436 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14437 		wm_put_hw_semaphore_82573(sc);
   14438 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14439 		    device_xname(sc->sc_dev));
   14440 		return -1;
   14441 	}
   14442 
   14443 	return 0;
   14444 }
   14445 
   14446 static void
   14447 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14448 {
   14449 	uint32_t reg;
   14450 
   14451 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14452 		device_xname(sc->sc_dev), __func__));
   14453 
   14454 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14455 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14456 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14457 }
   14458 
   14459 /*
   14460  * Management mode and power management related subroutines.
   14461  * BMC, AMT, suspend/resume and EEE.
   14462  */
   14463 
   14464 #ifdef WM_WOL
   14465 static int
   14466 wm_check_mng_mode(struct wm_softc *sc)
   14467 {
   14468 	int rv;
   14469 
   14470 	switch (sc->sc_type) {
   14471 	case WM_T_ICH8:
   14472 	case WM_T_ICH9:
   14473 	case WM_T_ICH10:
   14474 	case WM_T_PCH:
   14475 	case WM_T_PCH2:
   14476 	case WM_T_PCH_LPT:
   14477 	case WM_T_PCH_SPT:
   14478 	case WM_T_PCH_CNP:
   14479 		rv = wm_check_mng_mode_ich8lan(sc);
   14480 		break;
   14481 	case WM_T_82574:
   14482 	case WM_T_82583:
   14483 		rv = wm_check_mng_mode_82574(sc);
   14484 		break;
   14485 	case WM_T_82571:
   14486 	case WM_T_82572:
   14487 	case WM_T_82573:
   14488 	case WM_T_80003:
   14489 		rv = wm_check_mng_mode_generic(sc);
   14490 		break;
   14491 	default:
   14492 		/* Noting to do */
   14493 		rv = 0;
   14494 		break;
   14495 	}
   14496 
   14497 	return rv;
   14498 }
   14499 
   14500 static int
   14501 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14502 {
   14503 	uint32_t fwsm;
   14504 
   14505 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14506 
   14507 	if (((fwsm & FWSM_FW_VALID) != 0)
   14508 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14509 		return 1;
   14510 
   14511 	return 0;
   14512 }
   14513 
   14514 static int
   14515 wm_check_mng_mode_82574(struct wm_softc *sc)
   14516 {
   14517 	uint16_t data;
   14518 
   14519 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14520 
   14521 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14522 		return 1;
   14523 
   14524 	return 0;
   14525 }
   14526 
   14527 static int
   14528 wm_check_mng_mode_generic(struct wm_softc *sc)
   14529 {
   14530 	uint32_t fwsm;
   14531 
   14532 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14533 
   14534 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14535 		return 1;
   14536 
   14537 	return 0;
   14538 }
   14539 #endif /* WM_WOL */
   14540 
   14541 static int
   14542 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14543 {
   14544 	uint32_t manc, fwsm, factps;
   14545 
   14546 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14547 		return 0;
   14548 
   14549 	manc = CSR_READ(sc, WMREG_MANC);
   14550 
   14551 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14552 		device_xname(sc->sc_dev), manc));
   14553 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14554 		return 0;
   14555 
   14556 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14557 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14558 		factps = CSR_READ(sc, WMREG_FACTPS);
   14559 		if (((factps & FACTPS_MNGCG) == 0)
   14560 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14561 			return 1;
   14562 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14563 		uint16_t data;
   14564 
   14565 		factps = CSR_READ(sc, WMREG_FACTPS);
   14566 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14567 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14568 			device_xname(sc->sc_dev), factps, data));
   14569 		if (((factps & FACTPS_MNGCG) == 0)
   14570 		    && ((data & NVM_CFG2_MNGM_MASK)
   14571 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14572 			return 1;
   14573 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14574 	    && ((manc & MANC_ASF_EN) == 0))
   14575 		return 1;
   14576 
   14577 	return 0;
   14578 }
   14579 
   14580 static bool
   14581 wm_phy_resetisblocked(struct wm_softc *sc)
   14582 {
   14583 	bool blocked = false;
   14584 	uint32_t reg;
   14585 	int i = 0;
   14586 
   14587 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14588 		device_xname(sc->sc_dev), __func__));
   14589 
   14590 	switch (sc->sc_type) {
   14591 	case WM_T_ICH8:
   14592 	case WM_T_ICH9:
   14593 	case WM_T_ICH10:
   14594 	case WM_T_PCH:
   14595 	case WM_T_PCH2:
   14596 	case WM_T_PCH_LPT:
   14597 	case WM_T_PCH_SPT:
   14598 	case WM_T_PCH_CNP:
   14599 		do {
   14600 			reg = CSR_READ(sc, WMREG_FWSM);
   14601 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14602 				blocked = true;
   14603 				delay(10*1000);
   14604 				continue;
   14605 			}
   14606 			blocked = false;
   14607 		} while (blocked && (i++ < 30));
   14608 		return blocked;
   14609 		break;
   14610 	case WM_T_82571:
   14611 	case WM_T_82572:
   14612 	case WM_T_82573:
   14613 	case WM_T_82574:
   14614 	case WM_T_82583:
   14615 	case WM_T_80003:
   14616 		reg = CSR_READ(sc, WMREG_MANC);
   14617 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14618 			return true;
   14619 		else
   14620 			return false;
   14621 		break;
   14622 	default:
   14623 		/* No problem */
   14624 		break;
   14625 	}
   14626 
   14627 	return false;
   14628 }
   14629 
   14630 static void
   14631 wm_get_hw_control(struct wm_softc *sc)
   14632 {
   14633 	uint32_t reg;
   14634 
   14635 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14636 		device_xname(sc->sc_dev), __func__));
   14637 
   14638 	if (sc->sc_type == WM_T_82573) {
   14639 		reg = CSR_READ(sc, WMREG_SWSM);
   14640 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14641 	} else if (sc->sc_type >= WM_T_82571) {
   14642 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14643 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14644 	}
   14645 }
   14646 
   14647 static void
   14648 wm_release_hw_control(struct wm_softc *sc)
   14649 {
   14650 	uint32_t reg;
   14651 
   14652 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14653 		device_xname(sc->sc_dev), __func__));
   14654 
   14655 	if (sc->sc_type == WM_T_82573) {
   14656 		reg = CSR_READ(sc, WMREG_SWSM);
   14657 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14658 	} else if (sc->sc_type >= WM_T_82571) {
   14659 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14660 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14661 	}
   14662 }
   14663 
   14664 static void
   14665 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14666 {
   14667 	uint32_t reg;
   14668 
   14669 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14670 		device_xname(sc->sc_dev), __func__));
   14671 
   14672 	if (sc->sc_type < WM_T_PCH2)
   14673 		return;
   14674 
   14675 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14676 
   14677 	if (gate)
   14678 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14679 	else
   14680 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14681 
   14682 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14683 }
   14684 
   14685 static int
   14686 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14687 {
   14688 	uint32_t fwsm, reg;
   14689 	int rv = 0;
   14690 
   14691 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14692 		device_xname(sc->sc_dev), __func__));
   14693 
   14694 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14695 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14696 
   14697 	/* Disable ULP */
   14698 	wm_ulp_disable(sc);
   14699 
   14700 	/* Acquire PHY semaphore */
   14701 	rv = sc->phy.acquire(sc);
   14702 	if (rv != 0) {
   14703 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14704 		device_xname(sc->sc_dev), __func__));
   14705 		return -1;
   14706 	}
   14707 
   14708 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14709 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14710 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14711 	 */
   14712 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14713 	switch (sc->sc_type) {
   14714 	case WM_T_PCH_LPT:
   14715 	case WM_T_PCH_SPT:
   14716 	case WM_T_PCH_CNP:
   14717 		if (wm_phy_is_accessible_pchlan(sc))
   14718 			break;
   14719 
   14720 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14721 		 * forcing MAC to SMBus mode first.
   14722 		 */
   14723 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14724 		reg |= CTRL_EXT_FORCE_SMBUS;
   14725 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14726 #if 0
   14727 		/* XXX Isn't this required??? */
   14728 		CSR_WRITE_FLUSH(sc);
   14729 #endif
   14730 		/* Wait 50 milliseconds for MAC to finish any retries
   14731 		 * that it might be trying to perform from previous
   14732 		 * attempts to acknowledge any phy read requests.
   14733 		 */
   14734 		delay(50 * 1000);
   14735 		/* FALLTHROUGH */
   14736 	case WM_T_PCH2:
   14737 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14738 			break;
   14739 		/* FALLTHROUGH */
   14740 	case WM_T_PCH:
   14741 		if (sc->sc_type == WM_T_PCH)
   14742 			if ((fwsm & FWSM_FW_VALID) != 0)
   14743 				break;
   14744 
   14745 		if (wm_phy_resetisblocked(sc) == true) {
   14746 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14747 			break;
   14748 		}
   14749 
   14750 		/* Toggle LANPHYPC Value bit */
   14751 		wm_toggle_lanphypc_pch_lpt(sc);
   14752 
   14753 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14754 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14755 				break;
   14756 
   14757 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14758 			 * so ensure that the MAC is also out of SMBus mode
   14759 			 */
   14760 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14761 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14762 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14763 
   14764 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14765 				break;
   14766 			rv = -1;
   14767 		}
   14768 		break;
   14769 	default:
   14770 		break;
   14771 	}
   14772 
   14773 	/* Release semaphore */
   14774 	sc->phy.release(sc);
   14775 
   14776 	if (rv == 0) {
   14777 		/* Check to see if able to reset PHY.  Print error if not */
   14778 		if (wm_phy_resetisblocked(sc)) {
   14779 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14780 			goto out;
   14781 		}
   14782 
   14783 		/* Reset the PHY before any access to it.  Doing so, ensures
   14784 		 * that the PHY is in a known good state before we read/write
   14785 		 * PHY registers.  The generic reset is sufficient here,
   14786 		 * because we haven't determined the PHY type yet.
   14787 		 */
   14788 		if (wm_reset_phy(sc) != 0)
   14789 			goto out;
   14790 
   14791 		/* On a successful reset, possibly need to wait for the PHY
   14792 		 * to quiesce to an accessible state before returning control
   14793 		 * to the calling function.  If the PHY does not quiesce, then
   14794 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14795 		 *  the PHY is in.
   14796 		 */
   14797 		if (wm_phy_resetisblocked(sc))
   14798 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14799 	}
   14800 
   14801 out:
   14802 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14803 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14804 		delay(10*1000);
   14805 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14806 	}
   14807 
   14808 	return 0;
   14809 }
   14810 
   14811 static void
   14812 wm_init_manageability(struct wm_softc *sc)
   14813 {
   14814 
   14815 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14816 		device_xname(sc->sc_dev), __func__));
   14817 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14818 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14819 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14820 
   14821 		/* Disable hardware interception of ARP */
   14822 		manc &= ~MANC_ARP_EN;
   14823 
   14824 		/* Enable receiving management packets to the host */
   14825 		if (sc->sc_type >= WM_T_82571) {
   14826 			manc |= MANC_EN_MNG2HOST;
   14827 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14828 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14829 		}
   14830 
   14831 		CSR_WRITE(sc, WMREG_MANC, manc);
   14832 	}
   14833 }
   14834 
   14835 static void
   14836 wm_release_manageability(struct wm_softc *sc)
   14837 {
   14838 
   14839 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14840 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14841 
   14842 		manc |= MANC_ARP_EN;
   14843 		if (sc->sc_type >= WM_T_82571)
   14844 			manc &= ~MANC_EN_MNG2HOST;
   14845 
   14846 		CSR_WRITE(sc, WMREG_MANC, manc);
   14847 	}
   14848 }
   14849 
   14850 static void
   14851 wm_get_wakeup(struct wm_softc *sc)
   14852 {
   14853 
   14854 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14855 	switch (sc->sc_type) {
   14856 	case WM_T_82573:
   14857 	case WM_T_82583:
   14858 		sc->sc_flags |= WM_F_HAS_AMT;
   14859 		/* FALLTHROUGH */
   14860 	case WM_T_80003:
   14861 	case WM_T_82575:
   14862 	case WM_T_82576:
   14863 	case WM_T_82580:
   14864 	case WM_T_I350:
   14865 	case WM_T_I354:
   14866 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14867 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14868 		/* FALLTHROUGH */
   14869 	case WM_T_82541:
   14870 	case WM_T_82541_2:
   14871 	case WM_T_82547:
   14872 	case WM_T_82547_2:
   14873 	case WM_T_82571:
   14874 	case WM_T_82572:
   14875 	case WM_T_82574:
   14876 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14877 		break;
   14878 	case WM_T_ICH8:
   14879 	case WM_T_ICH9:
   14880 	case WM_T_ICH10:
   14881 	case WM_T_PCH:
   14882 	case WM_T_PCH2:
   14883 	case WM_T_PCH_LPT:
   14884 	case WM_T_PCH_SPT:
   14885 	case WM_T_PCH_CNP:
   14886 		sc->sc_flags |= WM_F_HAS_AMT;
   14887 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14888 		break;
   14889 	default:
   14890 		break;
   14891 	}
   14892 
   14893 	/* 1: HAS_MANAGE */
   14894 	if (wm_enable_mng_pass_thru(sc) != 0)
   14895 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14896 
   14897 	/*
   14898 	 * Note that the WOL flags is set after the resetting of the eeprom
   14899 	 * stuff
   14900 	 */
   14901 }
   14902 
   14903 /*
   14904  * Unconfigure Ultra Low Power mode.
   14905  * Only for I217 and newer (see below).
   14906  */
   14907 static int
   14908 wm_ulp_disable(struct wm_softc *sc)
   14909 {
   14910 	uint32_t reg;
   14911 	uint16_t phyreg;
   14912 	int i = 0, rv = 0;
   14913 
   14914 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14915 		device_xname(sc->sc_dev), __func__));
   14916 	/* Exclude old devices */
   14917 	if ((sc->sc_type < WM_T_PCH_LPT)
   14918 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14919 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14920 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14921 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14922 		return 0;
   14923 
   14924 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14925 		/* Request ME un-configure ULP mode in the PHY */
   14926 		reg = CSR_READ(sc, WMREG_H2ME);
   14927 		reg &= ~H2ME_ULP;
   14928 		reg |= H2ME_ENFORCE_SETTINGS;
   14929 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14930 
   14931 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14932 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14933 			if (i++ == 30) {
   14934 				device_printf(sc->sc_dev, "%s timed out\n",
   14935 				    __func__);
   14936 				return -1;
   14937 			}
   14938 			delay(10 * 1000);
   14939 		}
   14940 		reg = CSR_READ(sc, WMREG_H2ME);
   14941 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14942 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14943 
   14944 		return 0;
   14945 	}
   14946 
   14947 	/* Acquire semaphore */
   14948 	rv = sc->phy.acquire(sc);
   14949 	if (rv != 0) {
   14950 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14951 		device_xname(sc->sc_dev), __func__));
   14952 		return -1;
   14953 	}
   14954 
   14955 	/* Toggle LANPHYPC */
   14956 	wm_toggle_lanphypc_pch_lpt(sc);
   14957 
   14958 	/* Unforce SMBus mode in PHY */
   14959 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14960 	if (rv != 0) {
   14961 		uint32_t reg2;
   14962 
   14963 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14964 			__func__);
   14965 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14966 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14967 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14968 		delay(50 * 1000);
   14969 
   14970 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14971 		    &phyreg);
   14972 		if (rv != 0)
   14973 			goto release;
   14974 	}
   14975 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14976 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14977 
   14978 	/* Unforce SMBus mode in MAC */
   14979 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14980 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14981 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14982 
   14983 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14984 	if (rv != 0)
   14985 		goto release;
   14986 	phyreg |= HV_PM_CTRL_K1_ENA;
   14987 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14988 
   14989 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14990 		&phyreg);
   14991 	if (rv != 0)
   14992 		goto release;
   14993 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14994 	    | I218_ULP_CONFIG1_STICKY_ULP
   14995 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14996 	    | I218_ULP_CONFIG1_WOL_HOST
   14997 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14998 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14999 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15000 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15001 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15002 	phyreg |= I218_ULP_CONFIG1_START;
   15003 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15004 
   15005 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15006 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15007 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15008 
   15009 release:
   15010 	/* Release semaphore */
   15011 	sc->phy.release(sc);
   15012 	wm_gmii_reset(sc);
   15013 	delay(50 * 1000);
   15014 
   15015 	return rv;
   15016 }
   15017 
   15018 /* WOL in the newer chipset interfaces (pchlan) */
   15019 static int
   15020 wm_enable_phy_wakeup(struct wm_softc *sc)
   15021 {
   15022 	device_t dev = sc->sc_dev;
   15023 	uint32_t mreg, moff;
   15024 	uint16_t wuce, wuc, wufc, preg;
   15025 	int i, rv;
   15026 
   15027 	KASSERT(sc->sc_type >= WM_T_PCH);
   15028 
   15029 	/* Copy MAC RARs to PHY RARs */
   15030 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15031 
   15032 	/* Activate PHY wakeup */
   15033 	rv = sc->phy.acquire(sc);
   15034 	if (rv != 0) {
   15035 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15036 		    __func__);
   15037 		return rv;
   15038 	}
   15039 
   15040 	/*
   15041 	 * Enable access to PHY wakeup registers.
   15042 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15043 	 */
   15044 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15045 	if (rv != 0) {
   15046 		device_printf(dev,
   15047 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15048 		goto release;
   15049 	}
   15050 
   15051 	/* Copy MAC MTA to PHY MTA */
   15052 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15053 		uint16_t lo, hi;
   15054 
   15055 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15056 		lo = (uint16_t)(mreg & 0xffff);
   15057 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15058 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15059 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15060 	}
   15061 
   15062 	/* Configure PHY Rx Control register */
   15063 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15064 	mreg = CSR_READ(sc, WMREG_RCTL);
   15065 	if (mreg & RCTL_UPE)
   15066 		preg |= BM_RCTL_UPE;
   15067 	if (mreg & RCTL_MPE)
   15068 		preg |= BM_RCTL_MPE;
   15069 	preg &= ~(BM_RCTL_MO_MASK);
   15070 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15071 	if (moff != 0)
   15072 		preg |= moff << BM_RCTL_MO_SHIFT;
   15073 	if (mreg & RCTL_BAM)
   15074 		preg |= BM_RCTL_BAM;
   15075 	if (mreg & RCTL_PMCF)
   15076 		preg |= BM_RCTL_PMCF;
   15077 	mreg = CSR_READ(sc, WMREG_CTRL);
   15078 	if (mreg & CTRL_RFCE)
   15079 		preg |= BM_RCTL_RFCE;
   15080 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15081 
   15082 	wuc = WUC_APME | WUC_PME_EN;
   15083 	wufc = WUFC_MAG;
   15084 	/* Enable PHY wakeup in MAC register */
   15085 	CSR_WRITE(sc, WMREG_WUC,
   15086 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15087 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15088 
   15089 	/* Configure and enable PHY wakeup in PHY registers */
   15090 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15091 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15092 
   15093 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15094 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15095 
   15096 release:
   15097 	sc->phy.release(sc);
   15098 
   15099 	return 0;
   15100 }
   15101 
   15102 /* Power down workaround on D3 */
   15103 static void
   15104 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15105 {
   15106 	uint32_t reg;
   15107 	uint16_t phyreg;
   15108 	int i;
   15109 
   15110 	for (i = 0; i < 2; i++) {
   15111 		/* Disable link */
   15112 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15113 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15114 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15115 
   15116 		/*
   15117 		 * Call gig speed drop workaround on Gig disable before
   15118 		 * accessing any PHY registers
   15119 		 */
   15120 		if (sc->sc_type == WM_T_ICH8)
   15121 			wm_gig_downshift_workaround_ich8lan(sc);
   15122 
   15123 		/* Write VR power-down enable */
   15124 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15125 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15126 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15127 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15128 
   15129 		/* Read it back and test */
   15130 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15131 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15132 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15133 			break;
   15134 
   15135 		/* Issue PHY reset and repeat at most one more time */
   15136 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15137 	}
   15138 }
   15139 
   15140 /*
   15141  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15142  *  @sc: pointer to the HW structure
   15143  *
   15144  *  During S0 to Sx transition, it is possible the link remains at gig
   15145  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15146  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15147  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15148  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15149  *  needs to be written.
   15150  *  Parts that support (and are linked to a partner which support) EEE in
   15151  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15152  *  than 10Mbps w/o EEE.
   15153  */
   15154 static void
   15155 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15156 {
   15157 	device_t dev = sc->sc_dev;
   15158 	struct ethercom *ec = &sc->sc_ethercom;
   15159 	uint32_t phy_ctrl;
   15160 	int rv;
   15161 
   15162 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15163 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15164 
   15165 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15166 
   15167 	if (sc->sc_phytype == WMPHY_I217) {
   15168 		uint16_t devid = sc->sc_pcidevid;
   15169 
   15170 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15171 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15172 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15173 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15174 		    (sc->sc_type >= WM_T_PCH_SPT))
   15175 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15176 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15177 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15178 
   15179 		if (sc->phy.acquire(sc) != 0)
   15180 			goto out;
   15181 
   15182 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15183 			uint16_t eee_advert;
   15184 
   15185 			rv = wm_read_emi_reg_locked(dev,
   15186 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15187 			if (rv)
   15188 				goto release;
   15189 
   15190 			/*
   15191 			 * Disable LPLU if both link partners support 100BaseT
   15192 			 * EEE and 100Full is advertised on both ends of the
   15193 			 * link, and enable Auto Enable LPI since there will
   15194 			 * be no driver to enable LPI while in Sx.
   15195 			 */
   15196 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15197 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15198 				uint16_t anar, phy_reg;
   15199 
   15200 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15201 				    &anar);
   15202 				if (anar & ANAR_TX_FD) {
   15203 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15204 					    PHY_CTRL_NOND0A_LPLU);
   15205 
   15206 					/* Set Auto Enable LPI after link up */
   15207 					sc->phy.readreg_locked(dev, 2,
   15208 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15209 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15210 					sc->phy.writereg_locked(dev, 2,
   15211 					    I217_LPI_GPIO_CTRL, phy_reg);
   15212 				}
   15213 			}
   15214 		}
   15215 
   15216 		/*
   15217 		 * For i217 Intel Rapid Start Technology support,
   15218 		 * when the system is going into Sx and no manageability engine
   15219 		 * is present, the driver must configure proxy to reset only on
   15220 		 * power good.	LPI (Low Power Idle) state must also reset only
   15221 		 * on power good, as well as the MTA (Multicast table array).
   15222 		 * The SMBus release must also be disabled on LCD reset.
   15223 		 */
   15224 
   15225 		/*
   15226 		 * Enable MTA to reset for Intel Rapid Start Technology
   15227 		 * Support
   15228 		 */
   15229 
   15230 release:
   15231 		sc->phy.release(sc);
   15232 	}
   15233 out:
   15234 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15235 
   15236 	if (sc->sc_type == WM_T_ICH8)
   15237 		wm_gig_downshift_workaround_ich8lan(sc);
   15238 
   15239 	if (sc->sc_type >= WM_T_PCH) {
   15240 		wm_oem_bits_config_ich8lan(sc, false);
   15241 
   15242 		/* Reset PHY to activate OEM bits on 82577/8 */
   15243 		if (sc->sc_type == WM_T_PCH)
   15244 			wm_reset_phy(sc);
   15245 
   15246 		if (sc->phy.acquire(sc) != 0)
   15247 			return;
   15248 		wm_write_smbus_addr(sc);
   15249 		sc->phy.release(sc);
   15250 	}
   15251 }
   15252 
   15253 /*
   15254  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15255  *  @sc: pointer to the HW structure
   15256  *
   15257  *  During Sx to S0 transitions on non-managed devices or managed devices
   15258  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15259  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15260  *  the PHY.
   15261  *  On i217, setup Intel Rapid Start Technology.
   15262  */
   15263 static int
   15264 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15265 {
   15266 	device_t dev = sc->sc_dev;
   15267 	int rv;
   15268 
   15269 	if (sc->sc_type < WM_T_PCH2)
   15270 		return 0;
   15271 
   15272 	rv = wm_init_phy_workarounds_pchlan(sc);
   15273 	if (rv != 0)
   15274 		return -1;
   15275 
   15276 	/* For i217 Intel Rapid Start Technology support when the system
   15277 	 * is transitioning from Sx and no manageability engine is present
   15278 	 * configure SMBus to restore on reset, disable proxy, and enable
   15279 	 * the reset on MTA (Multicast table array).
   15280 	 */
   15281 	if (sc->sc_phytype == WMPHY_I217) {
   15282 		uint16_t phy_reg;
   15283 
   15284 		if (sc->phy.acquire(sc) != 0)
   15285 			return -1;
   15286 
   15287 		/* Clear Auto Enable LPI after link up */
   15288 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15289 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15290 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15291 
   15292 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15293 			/* Restore clear on SMB if no manageability engine
   15294 			 * is present
   15295 			 */
   15296 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15297 			    &phy_reg);
   15298 			if (rv != 0)
   15299 				goto release;
   15300 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15301 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15302 
   15303 			/* Disable Proxy */
   15304 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15305 		}
   15306 		/* Enable reset on MTA */
   15307 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15308 		if (rv != 0)
   15309 			goto release;
   15310 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15311 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15312 
   15313 release:
   15314 		sc->phy.release(sc);
   15315 		return rv;
   15316 	}
   15317 
   15318 	return 0;
   15319 }
   15320 
   15321 static void
   15322 wm_enable_wakeup(struct wm_softc *sc)
   15323 {
   15324 	uint32_t reg, pmreg;
   15325 	pcireg_t pmode;
   15326 	int rv = 0;
   15327 
   15328 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15329 		device_xname(sc->sc_dev), __func__));
   15330 
   15331 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15332 	    &pmreg, NULL) == 0)
   15333 		return;
   15334 
   15335 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15336 		goto pme;
   15337 
   15338 	/* Advertise the wakeup capability */
   15339 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15340 	    | CTRL_SWDPIN(3));
   15341 
   15342 	/* Keep the laser running on fiber adapters */
   15343 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15344 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15345 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15346 		reg |= CTRL_EXT_SWDPIN(3);
   15347 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15348 	}
   15349 
   15350 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15351 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15352 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15353 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15354 		wm_suspend_workarounds_ich8lan(sc);
   15355 
   15356 #if 0	/* For the multicast packet */
   15357 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15358 	reg |= WUFC_MC;
   15359 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15360 #endif
   15361 
   15362 	if (sc->sc_type >= WM_T_PCH) {
   15363 		rv = wm_enable_phy_wakeup(sc);
   15364 		if (rv != 0)
   15365 			goto pme;
   15366 	} else {
   15367 		/* Enable wakeup by the MAC */
   15368 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15369 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15370 	}
   15371 
   15372 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15373 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15374 		|| (sc->sc_type == WM_T_PCH2))
   15375 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15376 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15377 
   15378 pme:
   15379 	/* Request PME */
   15380 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15381 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15382 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15383 		/* For WOL */
   15384 		pmode |= PCI_PMCSR_PME_EN;
   15385 	} else {
   15386 		/* Disable WOL */
   15387 		pmode &= ~PCI_PMCSR_PME_EN;
   15388 	}
   15389 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15390 }
   15391 
   15392 /* Disable ASPM L0s and/or L1 for workaround */
   15393 static void
   15394 wm_disable_aspm(struct wm_softc *sc)
   15395 {
   15396 	pcireg_t reg, mask = 0;
   15397 	unsigned const char *str = "";
   15398 
   15399 	/*
   15400 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15401 	 * space.
   15402 	 */
   15403 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15404 		return;
   15405 
   15406 	switch (sc->sc_type) {
   15407 	case WM_T_82571:
   15408 	case WM_T_82572:
   15409 		/*
   15410 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15411 		 * State Power management L1 State (ASPM L1).
   15412 		 */
   15413 		mask = PCIE_LCSR_ASPM_L1;
   15414 		str = "L1 is";
   15415 		break;
   15416 	case WM_T_82573:
   15417 	case WM_T_82574:
   15418 	case WM_T_82583:
   15419 		/*
   15420 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15421 		 *
   15422 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15423 		 * some chipset.  The document of 82574 and 82583 says that
   15424 		 * disabling L0s with some specific chipset is sufficient,
   15425 		 * but we follow as of the Intel em driver does.
   15426 		 *
   15427 		 * References:
   15428 		 * Errata 8 of the Specification Update of i82573.
   15429 		 * Errata 20 of the Specification Update of i82574.
   15430 		 * Errata 9 of the Specification Update of i82583.
   15431 		 */
   15432 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15433 		str = "L0s and L1 are";
   15434 		break;
   15435 	default:
   15436 		return;
   15437 	}
   15438 
   15439 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15440 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15441 	reg &= ~mask;
   15442 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15443 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15444 
   15445 	/* Print only in wm_attach() */
   15446 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15447 		aprint_verbose_dev(sc->sc_dev,
   15448 		    "ASPM %s disabled to workaround the errata.\n", str);
   15449 }
   15450 
   15451 /* LPLU */
   15452 
   15453 static void
   15454 wm_lplu_d0_disable(struct wm_softc *sc)
   15455 {
   15456 	struct mii_data *mii = &sc->sc_mii;
   15457 	uint32_t reg;
   15458 	uint16_t phyval;
   15459 
   15460 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15461 		device_xname(sc->sc_dev), __func__));
   15462 
   15463 	if (sc->sc_phytype == WMPHY_IFE)
   15464 		return;
   15465 
   15466 	switch (sc->sc_type) {
   15467 	case WM_T_82571:
   15468 	case WM_T_82572:
   15469 	case WM_T_82573:
   15470 	case WM_T_82575:
   15471 	case WM_T_82576:
   15472 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15473 		phyval &= ~PMR_D0_LPLU;
   15474 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15475 		break;
   15476 	case WM_T_82580:
   15477 	case WM_T_I350:
   15478 	case WM_T_I210:
   15479 	case WM_T_I211:
   15480 		reg = CSR_READ(sc, WMREG_PHPM);
   15481 		reg &= ~PHPM_D0A_LPLU;
   15482 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15483 		break;
   15484 	case WM_T_82574:
   15485 	case WM_T_82583:
   15486 	case WM_T_ICH8:
   15487 	case WM_T_ICH9:
   15488 	case WM_T_ICH10:
   15489 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15490 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15491 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15492 		CSR_WRITE_FLUSH(sc);
   15493 		break;
   15494 	case WM_T_PCH:
   15495 	case WM_T_PCH2:
   15496 	case WM_T_PCH_LPT:
   15497 	case WM_T_PCH_SPT:
   15498 	case WM_T_PCH_CNP:
   15499 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15500 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15501 		if (wm_phy_resetisblocked(sc) == false)
   15502 			phyval |= HV_OEM_BITS_ANEGNOW;
   15503 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15504 		break;
   15505 	default:
   15506 		break;
   15507 	}
   15508 }
   15509 
   15510 /* EEE */
   15511 
   15512 static int
   15513 wm_set_eee_i350(struct wm_softc *sc)
   15514 {
   15515 	struct ethercom *ec = &sc->sc_ethercom;
   15516 	uint32_t ipcnfg, eeer;
   15517 	uint32_t ipcnfg_mask
   15518 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15519 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15520 
   15521 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15522 
   15523 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15524 	eeer = CSR_READ(sc, WMREG_EEER);
   15525 
   15526 	/* Enable or disable per user setting */
   15527 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15528 		ipcnfg |= ipcnfg_mask;
   15529 		eeer |= eeer_mask;
   15530 	} else {
   15531 		ipcnfg &= ~ipcnfg_mask;
   15532 		eeer &= ~eeer_mask;
   15533 	}
   15534 
   15535 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15536 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15537 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15538 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15539 
   15540 	return 0;
   15541 }
   15542 
   15543 static int
   15544 wm_set_eee_pchlan(struct wm_softc *sc)
   15545 {
   15546 	device_t dev = sc->sc_dev;
   15547 	struct ethercom *ec = &sc->sc_ethercom;
   15548 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15549 	int rv = 0;
   15550 
   15551 	switch (sc->sc_phytype) {
   15552 	case WMPHY_82579:
   15553 		lpa = I82579_EEE_LP_ABILITY;
   15554 		pcs_status = I82579_EEE_PCS_STATUS;
   15555 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15556 		break;
   15557 	case WMPHY_I217:
   15558 		lpa = I217_EEE_LP_ABILITY;
   15559 		pcs_status = I217_EEE_PCS_STATUS;
   15560 		adv_addr = I217_EEE_ADVERTISEMENT;
   15561 		break;
   15562 	default:
   15563 		return 0;
   15564 	}
   15565 
   15566 	if (sc->phy.acquire(sc)) {
   15567 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15568 		return 0;
   15569 	}
   15570 
   15571 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15572 	if (rv != 0)
   15573 		goto release;
   15574 
   15575 	/* Clear bits that enable EEE in various speeds */
   15576 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15577 
   15578 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15579 		/* Save off link partner's EEE ability */
   15580 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15581 		if (rv != 0)
   15582 			goto release;
   15583 
   15584 		/* Read EEE advertisement */
   15585 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15586 			goto release;
   15587 
   15588 		/*
   15589 		 * Enable EEE only for speeds in which the link partner is
   15590 		 * EEE capable and for which we advertise EEE.
   15591 		 */
   15592 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15593 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15594 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15595 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15596 			if ((data & ANLPAR_TX_FD) != 0)
   15597 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15598 			else {
   15599 				/*
   15600 				 * EEE is not supported in 100Half, so ignore
   15601 				 * partner's EEE in 100 ability if full-duplex
   15602 				 * is not advertised.
   15603 				 */
   15604 				sc->eee_lp_ability
   15605 				    &= ~AN_EEEADVERT_100_TX;
   15606 			}
   15607 		}
   15608 	}
   15609 
   15610 	if (sc->sc_phytype == WMPHY_82579) {
   15611 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15612 		if (rv != 0)
   15613 			goto release;
   15614 
   15615 		data &= ~I82579_LPI_PLL_SHUT_100;
   15616 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15617 	}
   15618 
   15619 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15620 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15621 		goto release;
   15622 
   15623 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15624 release:
   15625 	sc->phy.release(sc);
   15626 
   15627 	return rv;
   15628 }
   15629 
   15630 static int
   15631 wm_set_eee(struct wm_softc *sc)
   15632 {
   15633 	struct ethercom *ec = &sc->sc_ethercom;
   15634 
   15635 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15636 		return 0;
   15637 
   15638 	if (sc->sc_type == WM_T_I354) {
   15639 		/* I354 uses an external PHY */
   15640 		return 0; /* not yet */
   15641 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15642 		return wm_set_eee_i350(sc);
   15643 	else if (sc->sc_type >= WM_T_PCH2)
   15644 		return wm_set_eee_pchlan(sc);
   15645 
   15646 	return 0;
   15647 }
   15648 
   15649 /*
   15650  * Workarounds (mainly PHY related).
   15651  * Basically, PHY's workarounds are in the PHY drivers.
   15652  */
   15653 
   15654 /* Work-around for 82566 Kumeran PCS lock loss */
   15655 static int
   15656 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15657 {
   15658 	struct mii_data *mii = &sc->sc_mii;
   15659 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15660 	int i, reg, rv;
   15661 	uint16_t phyreg;
   15662 
   15663 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15664 		device_xname(sc->sc_dev), __func__));
   15665 
   15666 	/* If the link is not up, do nothing */
   15667 	if ((status & STATUS_LU) == 0)
   15668 		return 0;
   15669 
   15670 	/* Nothing to do if the link is other than 1Gbps */
   15671 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15672 		return 0;
   15673 
   15674 	for (i = 0; i < 10; i++) {
   15675 		/* read twice */
   15676 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15677 		if (rv != 0)
   15678 			return rv;
   15679 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15680 		if (rv != 0)
   15681 			return rv;
   15682 
   15683 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15684 			goto out;	/* GOOD! */
   15685 
   15686 		/* Reset the PHY */
   15687 		wm_reset_phy(sc);
   15688 		delay(5*1000);
   15689 	}
   15690 
   15691 	/* Disable GigE link negotiation */
   15692 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15693 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15694 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15695 
   15696 	/*
   15697 	 * Call gig speed drop workaround on Gig disable before accessing
   15698 	 * any PHY registers.
   15699 	 */
   15700 	wm_gig_downshift_workaround_ich8lan(sc);
   15701 
   15702 out:
   15703 	return 0;
   15704 }
   15705 
   15706 /*
   15707  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15708  *  @sc: pointer to the HW structure
   15709  *
   15710  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15711  *  LPLU, Gig disable, MDIC PHY reset):
   15712  *    1) Set Kumeran Near-end loopback
   15713  *    2) Clear Kumeran Near-end loopback
   15714  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15715  */
   15716 static void
   15717 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15718 {
   15719 	uint16_t kmreg;
   15720 
   15721 	/* Only for igp3 */
   15722 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15723 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15724 			return;
   15725 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15726 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15727 			return;
   15728 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15729 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15730 	}
   15731 }
   15732 
   15733 /*
   15734  * Workaround for pch's PHYs
   15735  * XXX should be moved to new PHY driver?
   15736  */
   15737 static int
   15738 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15739 {
   15740 	device_t dev = sc->sc_dev;
   15741 	struct mii_data *mii = &sc->sc_mii;
   15742 	struct mii_softc *child;
   15743 	uint16_t phy_data, phyrev = 0;
   15744 	int phytype = sc->sc_phytype;
   15745 	int rv;
   15746 
   15747 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15748 		device_xname(dev), __func__));
   15749 	KASSERT(sc->sc_type == WM_T_PCH);
   15750 
   15751 	/* Set MDIO slow mode before any other MDIO access */
   15752 	if (phytype == WMPHY_82577)
   15753 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15754 			return rv;
   15755 
   15756 	child = LIST_FIRST(&mii->mii_phys);
   15757 	if (child != NULL)
   15758 		phyrev = child->mii_mpd_rev;
   15759 
   15760 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15761 	if ((child != NULL) &&
   15762 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15763 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15764 		/* Disable generation of early preamble (0x4431) */
   15765 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15766 		    &phy_data);
   15767 		if (rv != 0)
   15768 			return rv;
   15769 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15770 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15771 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15772 		    phy_data);
   15773 		if (rv != 0)
   15774 			return rv;
   15775 
   15776 		/* Preamble tuning for SSC */
   15777 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15778 		if (rv != 0)
   15779 			return rv;
   15780 	}
   15781 
   15782 	/* 82578 */
   15783 	if (phytype == WMPHY_82578) {
   15784 		/*
   15785 		 * Return registers to default by doing a soft reset then
   15786 		 * writing 0x3140 to the control register
   15787 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15788 		 */
   15789 		if ((child != NULL) && (phyrev < 2)) {
   15790 			PHY_RESET(child);
   15791 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15792 			if (rv != 0)
   15793 				return rv;
   15794 		}
   15795 	}
   15796 
   15797 	/* Select page 0 */
   15798 	if ((rv = sc->phy.acquire(sc)) != 0)
   15799 		return rv;
   15800 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15801 	sc->phy.release(sc);
   15802 	if (rv != 0)
   15803 		return rv;
   15804 
   15805 	/*
   15806 	 * Configure the K1 Si workaround during phy reset assuming there is
   15807 	 * link so that it disables K1 if link is in 1Gbps.
   15808 	 */
   15809 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15810 		return rv;
   15811 
   15812 	/* Workaround for link disconnects on a busy hub in half duplex */
   15813 	rv = sc->phy.acquire(sc);
   15814 	if (rv)
   15815 		return rv;
   15816 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15817 	if (rv)
   15818 		goto release;
   15819 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15820 	    phy_data & 0x00ff);
   15821 	if (rv)
   15822 		goto release;
   15823 
   15824 	/* Set MSE higher to enable link to stay up when noise is high */
   15825 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15826 release:
   15827 	sc->phy.release(sc);
   15828 
   15829 	return rv;
   15830 }
   15831 
   15832 /*
   15833  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15834  *  @sc:   pointer to the HW structure
   15835  */
   15836 static void
   15837 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15838 {
   15839 	device_t dev = sc->sc_dev;
   15840 	uint32_t mac_reg;
   15841 	uint16_t i, wuce;
   15842 	int count;
   15843 
   15844 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15845 		device_xname(sc->sc_dev), __func__));
   15846 
   15847 	if (sc->phy.acquire(sc) != 0)
   15848 		return;
   15849 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15850 		goto release;
   15851 
   15852 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15853 	count = wm_rar_count(sc);
   15854 	for (i = 0; i < count; i++) {
   15855 		uint16_t lo, hi;
   15856 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15857 		lo = (uint16_t)(mac_reg & 0xffff);
   15858 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15859 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15860 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15861 
   15862 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15863 		lo = (uint16_t)(mac_reg & 0xffff);
   15864 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15865 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15866 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15867 	}
   15868 
   15869 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15870 
   15871 release:
   15872 	sc->phy.release(sc);
   15873 }
   15874 
   15875 /*
   15876  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15877  *  done after every PHY reset.
   15878  */
   15879 static int
   15880 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15881 {
   15882 	device_t dev = sc->sc_dev;
   15883 	int rv;
   15884 
   15885 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15886 		device_xname(dev), __func__));
   15887 	KASSERT(sc->sc_type == WM_T_PCH2);
   15888 
   15889 	/* Set MDIO slow mode before any other MDIO access */
   15890 	rv = wm_set_mdio_slow_mode_hv(sc);
   15891 	if (rv != 0)
   15892 		return rv;
   15893 
   15894 	rv = sc->phy.acquire(sc);
   15895 	if (rv != 0)
   15896 		return rv;
   15897 	/* Set MSE higher to enable link to stay up when noise is high */
   15898 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15899 	if (rv != 0)
   15900 		goto release;
   15901 	/* Drop link after 5 times MSE threshold was reached */
   15902 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15903 release:
   15904 	sc->phy.release(sc);
   15905 
   15906 	return rv;
   15907 }
   15908 
   15909 /**
   15910  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15911  *  @link: link up bool flag
   15912  *
   15913  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15914  *  preventing further DMA write requests.  Workaround the issue by disabling
   15915  *  the de-assertion of the clock request when in 1Gpbs mode.
   15916  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15917  *  speeds in order to avoid Tx hangs.
   15918  **/
   15919 static int
   15920 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15921 {
   15922 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15923 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15924 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15925 	uint16_t phyreg;
   15926 
   15927 	if (link && (speed == STATUS_SPEED_1000)) {
   15928 		sc->phy.acquire(sc);
   15929 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15930 		    &phyreg);
   15931 		if (rv != 0)
   15932 			goto release;
   15933 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15934 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15935 		if (rv != 0)
   15936 			goto release;
   15937 		delay(20);
   15938 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15939 
   15940 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15941 		    &phyreg);
   15942 release:
   15943 		sc->phy.release(sc);
   15944 		return rv;
   15945 	}
   15946 
   15947 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15948 
   15949 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15950 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15951 	    || !link
   15952 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15953 		goto update_fextnvm6;
   15954 
   15955 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15956 
   15957 	/* Clear link status transmit timeout */
   15958 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15959 	if (speed == STATUS_SPEED_100) {
   15960 		/* Set inband Tx timeout to 5x10us for 100Half */
   15961 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15962 
   15963 		/* Do not extend the K1 entry latency for 100Half */
   15964 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15965 	} else {
   15966 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15967 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15968 
   15969 		/* Extend the K1 entry latency for 10 Mbps */
   15970 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15971 	}
   15972 
   15973 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15974 
   15975 update_fextnvm6:
   15976 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15977 	return 0;
   15978 }
   15979 
   15980 /*
   15981  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15982  *  @sc:   pointer to the HW structure
   15983  *  @link: link up bool flag
   15984  *
   15985  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15986  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15987  *  If link is down, the function will restore the default K1 setting located
   15988  *  in the NVM.
   15989  */
   15990 static int
   15991 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15992 {
   15993 	int k1_enable = sc->sc_nvm_k1_enabled;
   15994 
   15995 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15996 		device_xname(sc->sc_dev), __func__));
   15997 
   15998 	if (sc->phy.acquire(sc) != 0)
   15999 		return -1;
   16000 
   16001 	if (link) {
   16002 		k1_enable = 0;
   16003 
   16004 		/* Link stall fix for link up */
   16005 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16006 		    0x0100);
   16007 	} else {
   16008 		/* Link stall fix for link down */
   16009 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16010 		    0x4100);
   16011 	}
   16012 
   16013 	wm_configure_k1_ich8lan(sc, k1_enable);
   16014 	sc->phy.release(sc);
   16015 
   16016 	return 0;
   16017 }
   16018 
   16019 /*
   16020  *  wm_k1_workaround_lv - K1 Si workaround
   16021  *  @sc:   pointer to the HW structure
   16022  *
   16023  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16024  *  Disable K1 for 1000 and 100 speeds
   16025  */
   16026 static int
   16027 wm_k1_workaround_lv(struct wm_softc *sc)
   16028 {
   16029 	uint32_t reg;
   16030 	uint16_t phyreg;
   16031 	int rv;
   16032 
   16033 	if (sc->sc_type != WM_T_PCH2)
   16034 		return 0;
   16035 
   16036 	/* Set K1 beacon duration based on 10Mbps speed */
   16037 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16038 	if (rv != 0)
   16039 		return rv;
   16040 
   16041 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16042 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16043 		if (phyreg &
   16044 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16045 			/* LV 1G/100 Packet drop issue wa  */
   16046 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16047 			    &phyreg);
   16048 			if (rv != 0)
   16049 				return rv;
   16050 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16051 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16052 			    phyreg);
   16053 			if (rv != 0)
   16054 				return rv;
   16055 		} else {
   16056 			/* For 10Mbps */
   16057 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16058 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16059 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16060 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16061 		}
   16062 	}
   16063 
   16064 	return 0;
   16065 }
   16066 
   16067 /*
   16068  *  wm_link_stall_workaround_hv - Si workaround
   16069  *  @sc: pointer to the HW structure
   16070  *
   16071  *  This function works around a Si bug where the link partner can get
   16072  *  a link up indication before the PHY does. If small packets are sent
   16073  *  by the link partner they can be placed in the packet buffer without
   16074  *  being properly accounted for by the PHY and will stall preventing
   16075  *  further packets from being received.  The workaround is to clear the
   16076  *  packet buffer after the PHY detects link up.
   16077  */
   16078 static int
   16079 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16080 {
   16081 	uint16_t phyreg;
   16082 
   16083 	if (sc->sc_phytype != WMPHY_82578)
   16084 		return 0;
   16085 
   16086 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16087 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16088 	if ((phyreg & BMCR_LOOP) != 0)
   16089 		return 0;
   16090 
   16091 	/* Check if link is up and at 1Gbps */
   16092 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16093 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16094 	    | BM_CS_STATUS_SPEED_MASK;
   16095 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16096 		| BM_CS_STATUS_SPEED_1000))
   16097 		return 0;
   16098 
   16099 	delay(200 * 1000);	/* XXX too big */
   16100 
   16101 	/* Flush the packets in the fifo buffer */
   16102 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16103 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16104 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16105 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16106 
   16107 	return 0;
   16108 }
   16109 
   16110 static int
   16111 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16112 {
   16113 	int rv;
   16114 	uint16_t reg;
   16115 
   16116 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16117 	if (rv != 0)
   16118 		return rv;
   16119 
   16120 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16121 	    reg | HV_KMRN_MDIO_SLOW);
   16122 }
   16123 
   16124 /*
   16125  *  wm_configure_k1_ich8lan - Configure K1 power state
   16126  *  @sc: pointer to the HW structure
   16127  *  @enable: K1 state to configure
   16128  *
   16129  *  Configure the K1 power state based on the provided parameter.
   16130  *  Assumes semaphore already acquired.
   16131  */
   16132 static void
   16133 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16134 {
   16135 	uint32_t ctrl, ctrl_ext, tmp;
   16136 	uint16_t kmreg;
   16137 	int rv;
   16138 
   16139 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16140 
   16141 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16142 	if (rv != 0)
   16143 		return;
   16144 
   16145 	if (k1_enable)
   16146 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16147 	else
   16148 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16149 
   16150 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16151 	if (rv != 0)
   16152 		return;
   16153 
   16154 	delay(20);
   16155 
   16156 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16157 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16158 
   16159 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16160 	tmp |= CTRL_FRCSPD;
   16161 
   16162 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16163 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16164 	CSR_WRITE_FLUSH(sc);
   16165 	delay(20);
   16166 
   16167 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16168 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16169 	CSR_WRITE_FLUSH(sc);
   16170 	delay(20);
   16171 
   16172 	return;
   16173 }
   16174 
   16175 /* special case - for 82575 - need to do manual init ... */
   16176 static void
   16177 wm_reset_init_script_82575(struct wm_softc *sc)
   16178 {
   16179 	/*
   16180 	 * Remark: this is untested code - we have no board without EEPROM
   16181 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16182 	 */
   16183 
   16184 	/* SerDes configuration via SERDESCTRL */
   16185 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16186 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16187 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16188 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16189 
   16190 	/* CCM configuration via CCMCTL register */
   16191 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16192 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16193 
   16194 	/* PCIe lanes configuration */
   16195 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16196 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16197 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16198 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16199 
   16200 	/* PCIe PLL Configuration */
   16201 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16202 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16203 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16204 }
   16205 
   16206 static void
   16207 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16208 {
   16209 	uint32_t reg;
   16210 	uint16_t nvmword;
   16211 	int rv;
   16212 
   16213 	if (sc->sc_type != WM_T_82580)
   16214 		return;
   16215 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16216 		return;
   16217 
   16218 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16219 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16220 	if (rv != 0) {
   16221 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16222 		    __func__);
   16223 		return;
   16224 	}
   16225 
   16226 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16227 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16228 		reg |= MDICNFG_DEST;
   16229 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16230 		reg |= MDICNFG_COM_MDIO;
   16231 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16232 }
   16233 
   16234 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16235 
   16236 static bool
   16237 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16238 {
   16239 	uint32_t reg;
   16240 	uint16_t id1, id2;
   16241 	int i, rv;
   16242 
   16243 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16244 		device_xname(sc->sc_dev), __func__));
   16245 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16246 
   16247 	id1 = id2 = 0xffff;
   16248 	for (i = 0; i < 2; i++) {
   16249 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16250 		    &id1);
   16251 		if ((rv != 0) || MII_INVALIDID(id1))
   16252 			continue;
   16253 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16254 		    &id2);
   16255 		if ((rv != 0) || MII_INVALIDID(id2))
   16256 			continue;
   16257 		break;
   16258 	}
   16259 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16260 		goto out;
   16261 
   16262 	/*
   16263 	 * In case the PHY needs to be in mdio slow mode,
   16264 	 * set slow mode and try to get the PHY id again.
   16265 	 */
   16266 	rv = 0;
   16267 	if (sc->sc_type < WM_T_PCH_LPT) {
   16268 		sc->phy.release(sc);
   16269 		wm_set_mdio_slow_mode_hv(sc);
   16270 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16271 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16272 		sc->phy.acquire(sc);
   16273 	}
   16274 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16275 		device_printf(sc->sc_dev, "XXX return with false\n");
   16276 		return false;
   16277 	}
   16278 out:
   16279 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16280 		/* Only unforce SMBus if ME is not active */
   16281 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16282 			uint16_t phyreg;
   16283 
   16284 			/* Unforce SMBus mode in PHY */
   16285 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16286 			    CV_SMB_CTRL, &phyreg);
   16287 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16288 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16289 			    CV_SMB_CTRL, phyreg);
   16290 
   16291 			/* Unforce SMBus mode in MAC */
   16292 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16293 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16294 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16295 		}
   16296 	}
   16297 	return true;
   16298 }
   16299 
   16300 static void
   16301 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16302 {
   16303 	uint32_t reg;
   16304 	int i;
   16305 
   16306 	/* Set PHY Config Counter to 50msec */
   16307 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16308 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16309 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16310 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16311 
   16312 	/* Toggle LANPHYPC */
   16313 	reg = CSR_READ(sc, WMREG_CTRL);
   16314 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16315 	reg &= ~CTRL_LANPHYPC_VALUE;
   16316 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16317 	CSR_WRITE_FLUSH(sc);
   16318 	delay(1000);
   16319 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16320 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16321 	CSR_WRITE_FLUSH(sc);
   16322 
   16323 	if (sc->sc_type < WM_T_PCH_LPT)
   16324 		delay(50 * 1000);
   16325 	else {
   16326 		i = 20;
   16327 
   16328 		do {
   16329 			delay(5 * 1000);
   16330 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16331 		    && i--);
   16332 
   16333 		delay(30 * 1000);
   16334 	}
   16335 }
   16336 
   16337 static int
   16338 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16339 {
   16340 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16341 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16342 	uint32_t rxa;
   16343 	uint16_t scale = 0, lat_enc = 0;
   16344 	int32_t obff_hwm = 0;
   16345 	int64_t lat_ns, value;
   16346 
   16347 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16348 		device_xname(sc->sc_dev), __func__));
   16349 
   16350 	if (link) {
   16351 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16352 		uint32_t status;
   16353 		uint16_t speed;
   16354 		pcireg_t preg;
   16355 
   16356 		status = CSR_READ(sc, WMREG_STATUS);
   16357 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16358 		case STATUS_SPEED_10:
   16359 			speed = 10;
   16360 			break;
   16361 		case STATUS_SPEED_100:
   16362 			speed = 100;
   16363 			break;
   16364 		case STATUS_SPEED_1000:
   16365 			speed = 1000;
   16366 			break;
   16367 		default:
   16368 			device_printf(sc->sc_dev, "Unknown speed "
   16369 			    "(status = %08x)\n", status);
   16370 			return -1;
   16371 		}
   16372 
   16373 		/* Rx Packet Buffer Allocation size (KB) */
   16374 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16375 
   16376 		/*
   16377 		 * Determine the maximum latency tolerated by the device.
   16378 		 *
   16379 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16380 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16381 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16382 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16383 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16384 		 */
   16385 		lat_ns = ((int64_t)rxa * 1024 -
   16386 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16387 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16388 		if (lat_ns < 0)
   16389 			lat_ns = 0;
   16390 		else
   16391 			lat_ns /= speed;
   16392 		value = lat_ns;
   16393 
   16394 		while (value > LTRV_VALUE) {
   16395 			scale ++;
   16396 			value = howmany(value, __BIT(5));
   16397 		}
   16398 		if (scale > LTRV_SCALE_MAX) {
   16399 			device_printf(sc->sc_dev,
   16400 			    "Invalid LTR latency scale %d\n", scale);
   16401 			return -1;
   16402 		}
   16403 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16404 
   16405 		/* Determine the maximum latency tolerated by the platform */
   16406 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16407 		    WM_PCI_LTR_CAP_LPT);
   16408 		max_snoop = preg & 0xffff;
   16409 		max_nosnoop = preg >> 16;
   16410 
   16411 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16412 
   16413 		if (lat_enc > max_ltr_enc) {
   16414 			lat_enc = max_ltr_enc;
   16415 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16416 			    * PCI_LTR_SCALETONS(
   16417 				    __SHIFTOUT(lat_enc,
   16418 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16419 		}
   16420 
   16421 		if (lat_ns) {
   16422 			lat_ns *= speed * 1000;
   16423 			lat_ns /= 8;
   16424 			lat_ns /= 1000000000;
   16425 			obff_hwm = (int32_t)(rxa - lat_ns);
   16426 		}
   16427 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16428 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16429 			    "(rxa = %d, lat_ns = %d)\n",
   16430 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16431 			return -1;
   16432 		}
   16433 	}
   16434 	/* Snoop and No-Snoop latencies the same */
   16435 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16436 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16437 
   16438 	/* Set OBFF high water mark */
   16439 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16440 	reg |= obff_hwm;
   16441 	CSR_WRITE(sc, WMREG_SVT, reg);
   16442 
   16443 	/* Enable OBFF */
   16444 	reg = CSR_READ(sc, WMREG_SVCR);
   16445 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16446 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16447 
   16448 	return 0;
   16449 }
   16450 
   16451 /*
   16452  * I210 Errata 25 and I211 Errata 10
   16453  * Slow System Clock.
   16454  *
   16455  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16456  */
   16457 static int
   16458 wm_pll_workaround_i210(struct wm_softc *sc)
   16459 {
   16460 	uint32_t mdicnfg, wuc;
   16461 	uint32_t reg;
   16462 	pcireg_t pcireg;
   16463 	uint32_t pmreg;
   16464 	uint16_t nvmword, tmp_nvmword;
   16465 	uint16_t phyval;
   16466 	bool wa_done = false;
   16467 	int i, rv = 0;
   16468 
   16469 	/* Get Power Management cap offset */
   16470 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16471 	    &pmreg, NULL) == 0)
   16472 		return -1;
   16473 
   16474 	/* Save WUC and MDICNFG registers */
   16475 	wuc = CSR_READ(sc, WMREG_WUC);
   16476 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16477 
   16478 	reg = mdicnfg & ~MDICNFG_DEST;
   16479 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16480 
   16481 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   16482 		/*
   16483 		 * The default value of the Initialization Control Word 1
   16484 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   16485 		 */
   16486 		nvmword = INVM_DEFAULT_AL;
   16487 	}
   16488 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16489 
   16490 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16491 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16492 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16493 
   16494 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16495 			rv = 0;
   16496 			break; /* OK */
   16497 		} else
   16498 			rv = -1;
   16499 
   16500 		wa_done = true;
   16501 		/* Directly reset the internal PHY */
   16502 		reg = CSR_READ(sc, WMREG_CTRL);
   16503 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16504 
   16505 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16506 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16507 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16508 
   16509 		CSR_WRITE(sc, WMREG_WUC, 0);
   16510 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16511 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16512 
   16513 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16514 		    pmreg + PCI_PMCSR);
   16515 		pcireg |= PCI_PMCSR_STATE_D3;
   16516 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16517 		    pmreg + PCI_PMCSR, pcireg);
   16518 		delay(1000);
   16519 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16520 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16521 		    pmreg + PCI_PMCSR, pcireg);
   16522 
   16523 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16524 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16525 
   16526 		/* Restore WUC register */
   16527 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16528 	}
   16529 
   16530 	/* Restore MDICNFG setting */
   16531 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16532 	if (wa_done)
   16533 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16534 	return rv;
   16535 }
   16536 
   16537 static void
   16538 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16539 {
   16540 	uint32_t reg;
   16541 
   16542 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16543 		device_xname(sc->sc_dev), __func__));
   16544 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16545 	    || (sc->sc_type == WM_T_PCH_CNP));
   16546 
   16547 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16548 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16549 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16550 
   16551 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16552 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16553 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16554 }
   16555